code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
UpperCAmelCase_ : str = 'naver-clova-ix/donut-base'
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = DonutProcessor.from_pretrained(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
SCREAMING_SNAKE_CASE_ : Any = self.processor.tokenajson(lowercase_)
self.assertDictEqual(lowercase_ , lowercase_)
| 91
|
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _lowercase , _lowercase , _lowercase ):
# Initialise PyTorch model
_UpperCamelCase : List[Any] = MobileBertConfig.from_json_file(_lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase : List[str] = MobileBertForPreTraining(_lowercase )
# Load weights from tf checkpoint
_UpperCamelCase : Union[str, Any] = load_tf_weights_in_mobilebert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 128
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _a ( unittest.TestCase ):
def snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = tempfile.mkdtemp()
_UpperCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'''do_convert_rgb''': True,
}
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, lowerCAmelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : str, **lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Union[str, Any], **lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Any, **lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
_UpperCamelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__ )
def snake_case ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Dict = self.get_tokenizer(cls_token='''(CLS)''', sep_token='''(SEP)''' )
_UpperCamelCase : List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token='''(CLS)''', sep_token='''(SEP)''', do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase__ )
def snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_image_processor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.prepare_image_inputs()
_UpperCamelCase : Any = image_processor(lowerCAmelCase__, return_tensors='''np''' )
_UpperCamelCase : Any = processor(images=lowerCAmelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Tuple = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : List[str] = processor(text=lowerCAmelCase__ )
_UpperCamelCase : Any = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCamelCase : str = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def snake_case ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : List[Any] = processor.batch_decode(lowerCAmelCase__ )
_UpperCamelCase : Dict = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = self.get_image_processor()
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : int = self.prepare_image_inputs()
_UpperCamelCase : Dict = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 128
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
_UpperCamelCase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
_UpperCamelCase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
_UpperCamelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(UpperCAmelCase , UpperCAmelCase , sample_weight=UpperCAmelCase ) ),
}
| 326
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any:
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = search_prob
__snake_case : str = start_temperate
__snake_case : List[Any] = []
__snake_case : str = 0
__snake_case : Dict = None
while not search_end:
__snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case : List[Any] = current_state
scores.append(lowercase )
iterations += 1
__snake_case : Dict = None
__snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
__snake_case : int = neighbors.pop(lowercase )
__snake_case : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case : List[str] = picked_neighbor
else:
__snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case : str = picked_neighbor
__snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case : Optional[Any] = True
else:
__snake_case : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any:
return (3 * x**2) - (6 * y)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 326
| 1
|
import functools
from typing import Any
def _a ( UpperCAmelCase , UpperCAmelCase ) -> bool:
"""simple docstring"""
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or len(UpperCAmelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(
isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
lowerCamelCase__ : dict[str, Any] = {}
lowerCamelCase__ : Union[str, Any] = '''WORD_KEEPER'''
for word in words:
lowerCamelCase__ : List[str] = trie
for c in word:
if c not in trie_node:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : str = trie_node[c]
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Tuple = len(UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCAmelCase ) -> bool:
if index == len_string:
return True
lowerCamelCase__ : Union[str, Any] = trie
for i in range(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = trie_node.get(string[i] , UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(UpperCAmelCase , UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A : List[Any] = 'base_with_context'
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowerCamelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Any = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : int = ly_weight['''attention''']
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Tuple = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : str = ly_weight['''attention''']
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ : List[Any] = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = ly_weight['''self_attention''']
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = ly_weight['''MultiHeadDotProductAttention_0''']
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , UpperCAmelCase )
lowerCamelCase__ : List[str] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowerCamelCase__ : List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowerCamelCase__ : Optional[Any] = inference.parse_training_gin_file(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase )
lowerCamelCase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowerCamelCase__ : str = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ : Optional[int] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : int = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowerCamelCase__ : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase , continuous_encoder=UpperCAmelCase , decoder=UpperCAmelCase , scheduler=UpperCAmelCase , melgan=UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_A : Tuple = parser.parse_args()
main(args)
| 265
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( nn.Module ):
def __init__( self , lowercase=3 , lowercase=3 , lowercase=("DownEncoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase=True , ) -> List[str]:
super().__init__()
lowerCAmelCase = layers_per_block
lowerCAmelCase = torch.nn.Convad(
lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase = None
lowerCAmelCase = nn.ModuleList([] )
# down
lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(lowercase ):
lowerCAmelCase = output_channel
lowerCAmelCase = block_out_channels[i]
lowerCAmelCase = i == len(lowercase ) - 1
lowerCAmelCase = get_down_block(
lowercase , num_layers=self.layers_per_block , in_channels=lowercase , out_channels=lowercase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase , resnet_groups=lowercase , attention_head_dim=lowercase , temb_channels=lowercase , )
self.down_blocks.append(lowercase )
# mid
lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase , temb_channels=lowercase , )
# out
lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase , eps=1e-6 )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = 2 * out_channels if double_z else out_channels
lowerCAmelCase = nn.Convad(block_out_channels[-1] , lowercase , 3 , padding=1 )
lowerCAmelCase = False
def _snake_case ( self , lowercase ) -> int:
lowerCAmelCase = x
lowerCAmelCase = self.conv_in(lowercase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase ) , lowercase , use_reentrant=lowercase )
# middle
lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , use_reentrant=lowercase )
else:
for down_block in self.down_blocks:
lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase ) , lowercase )
# middle
lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase )
else:
# down
for down_block in self.down_blocks:
lowerCAmelCase = down_block(lowercase )
# middle
lowerCAmelCase = self.mid_block(lowercase )
# post-process
lowerCAmelCase = self.conv_norm_out(lowercase )
lowerCAmelCase = self.conv_act(lowercase )
lowerCAmelCase = self.conv_out(lowercase )
return sample
class lowercase ( nn.Module ):
def __init__( self , lowercase=3 , lowercase=3 , lowercase=("UpDecoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase="group" , ) -> Dict:
super().__init__()
lowerCAmelCase = layers_per_block
lowerCAmelCase = nn.Convad(
lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase = None
lowerCAmelCase = nn.ModuleList([] )
lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase , temb_channels=lowercase , )
# up
lowerCAmelCase = list(reversed(lowercase ) )
lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase ):
lowerCAmelCase = output_channel
lowerCAmelCase = reversed_block_out_channels[i]
lowerCAmelCase = i == len(lowercase ) - 1
lowerCAmelCase = get_up_block(
lowercase , num_layers=self.layers_per_block + 1 , in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase , resnet_groups=lowercase , attention_head_dim=lowercase , temb_channels=lowercase , resnet_time_scale_shift=lowercase , )
self.up_blocks.append(lowercase )
lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
lowerCAmelCase = SpatialNorm(block_out_channels[0] , lowercase )
else:
lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase , eps=1e-6 )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Convad(block_out_channels[0] , lowercase , 3 , padding=1 )
lowerCAmelCase = False
def _snake_case ( self , lowercase , lowercase=None ) -> str:
lowerCAmelCase = z
lowerCAmelCase = self.conv_in(lowercase )
lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , lowercase , use_reentrant=lowercase )
lowerCAmelCase = sample.to(lowercase )
# up
for up_block in self.up_blocks:
lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase ) , lowercase , lowercase , use_reentrant=lowercase )
else:
# middle
lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , lowercase )
lowerCAmelCase = sample.to(lowercase )
# up
for up_block in self.up_blocks:
lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase ) , lowercase , lowercase )
else:
# middle
lowerCAmelCase = self.mid_block(lowercase , lowercase )
lowerCAmelCase = sample.to(lowercase )
# up
for up_block in self.up_blocks:
lowerCAmelCase = up_block(lowercase , lowercase )
# post-process
if latent_embeds is None:
lowerCAmelCase = self.conv_norm_out(lowercase )
else:
lowerCAmelCase = self.conv_norm_out(lowercase , lowercase )
lowerCAmelCase = self.conv_act(lowercase )
lowerCAmelCase = self.conv_out(lowercase )
return sample
class lowercase ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase="random" , lowercase=False , lowercase=True ) -> Dict:
super().__init__()
lowerCAmelCase = n_e
lowerCAmelCase = vq_embed_dim
lowerCAmelCase = beta
lowerCAmelCase = legacy
lowerCAmelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
lowerCAmelCase = self.used.shape[0]
lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCAmelCase = self.re_embed
lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
lowerCAmelCase = n_e
lowerCAmelCase = sane_index_shape
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = inds.shape
assert len(lowercase ) > 1
lowerCAmelCase = inds.reshape(ishape[0] , -1 )
lowerCAmelCase = self.used.to(lowercase )
lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
lowerCAmelCase = match.argmax(-1 )
lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCAmelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCAmelCase = self.unknown_index
return new.reshape(lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = inds.shape
assert len(lowercase ) > 1
lowerCAmelCase = inds.reshape(ishape[0] , -1 )
lowerCAmelCase = self.used.to(lowercase )
if self.re_embed > self.used.shape[0]: # extra token
lowerCAmelCase = 0 # simply set to zero
lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase )
return back.reshape(lowercase )
def _snake_case ( self , lowercase ) -> Any:
# reshape z -> (batch, height, width, channel) and flatten
lowerCAmelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCAmelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCAmelCase = torch.argmin(torch.cdist(lowercase , self.embedding.weight ) , dim=1 )
lowerCAmelCase = self.embedding(lowercase ).view(z.shape )
lowerCAmelCase = None
lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCAmelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCAmelCase = self.remap_to_used(lowercase )
lowerCAmelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self , lowercase , lowercase ) -> Optional[int]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCAmelCase = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCAmelCase = self.unmap_to_all(lowercase )
lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCAmelCase = self.embedding(lowercase )
if shape is not None:
lowerCAmelCase = z_q.view(lowercase )
# reshape back to match original input shape
lowerCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=False ) -> Optional[int]:
lowerCAmelCase = parameters
lowerCAmelCase , lowerCAmelCase = torch.chunk(lowercase , 2 , dim=1 )
lowerCAmelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCAmelCase = deterministic
lowerCAmelCase = torch.exp(0.5 * self.logvar )
lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
lowerCAmelCase = lowerCAmelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self , lowercase = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCAmelCase = randn_tensor(
self.mean.shape , generator=lowercase , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCAmelCase = self.mean + self.std * sample
return x
def _snake_case ( self , lowercase=None ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self , lowercase , lowercase=[1, 2, 3] ) -> Any:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase )
def _snake_case ( self ) -> Optional[int]:
return self.mean
| 46
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46
| 1
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
a_ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
a_ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
a_ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : int ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def a_ ( self : List[str] , a__ : Optional[Any] , a__ : int , a__ : Dict=None , a__ : int=False , a__ : str=False , a__ : Union[str, Any]=False , ) -> Dict:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_A = np.array([re.sub(a__ , "" , a__ ) for x in predictions] )
_A = np.array([re.sub(a__ , "" , a__ ) for x in references] )
else:
_A = np.asarray(a__ )
_A = np.asarray(a__ )
if ignore_case:
_A = np.char.lower(a__ )
_A = np.char.lower(a__ )
if ignore_punctuation:
_A = string.punctuation.maketrans("" , "" , string.punctuation )
_A = np.char.translate(a__ , table=a__ )
_A = np.char.translate(a__ , table=a__ )
if ignore_numbers:
_A = string.digits.maketrans("" , "" , string.digits )
_A = np.char.translate(a__ , table=a__ )
_A = np.char.translate(a__ , table=a__ )
_A = predictions == references
return {"exact_match": np.mean(a__ ) * 1_00}
| 163
|
"""simple docstring"""
def a__ ( __lowercase=2_8123 ) -> List[Any]:
_A = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_A = set()
_A = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 163
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = (UniPCMultistepScheduler,)
UpperCamelCase = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self :Dict , **__UpperCamelCase :Dict ):
A = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Any]=0 , **__UpperCamelCase :int ):
A = dict(self.forward_default_kwargs )
A = kwargs.pop("num_inference_steps" , __UpperCamelCase )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**__UpperCamelCase )
A = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
A = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
A = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
A = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :List[str]=0 , **__UpperCamelCase :Any ):
A = dict(self.forward_default_kwargs )
A = kwargs.pop("num_inference_steps" , __UpperCamelCase )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
A = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[: new_scheduler.config.solver_order]
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
A = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Tuple=None , **__UpperCamelCase :Dict ):
if scheduler is None:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**__UpperCamelCase )
A = scheduler_class(**__UpperCamelCase )
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**__UpperCamelCase )
A = scheduler_class(**__UpperCamelCase )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A = model(__UpperCamelCase , __UpperCamelCase )
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def lowerCamelCase ( self :Any ):
A = dict(self.forward_default_kwargs )
A = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = self.dummy_sample
A = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A = [residual + 0.2, residual + 0.15, residual + 0.10]
A = dummy_past_residuals[: scheduler.config.solver_order]
A = scheduler.timesteps[5]
A = scheduler.timesteps[6]
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase ( self :str ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A = UniPCMultistepScheduler(**self.get_scheduler_config() )
A = self.full_loop(scheduler=__UpperCamelCase )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A = DEISMultistepScheduler.from_config(scheduler.config )
A = DPMSolverMultistepScheduler.from_config(scheduler.config )
A = UniPCMultistepScheduler.from_config(scheduler.config )
A = self.full_loop(scheduler=__UpperCamelCase )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def lowerCamelCase ( self :List[Any] ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def lowerCamelCase ( self :Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
A = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def lowerCamelCase ( self :Union[str, Any] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def lowerCamelCase ( self :Optional[int] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def lowerCamelCase ( self :Optional[int] ):
A = self.full_loop()
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def lowerCamelCase ( self :Tuple ):
A = self.full_loop(prediction_type="v_prediction" )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def lowerCamelCase ( self :str ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
A = scheduler_class(**__UpperCamelCase )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A = model(__UpperCamelCase , __UpperCamelCase )
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def lowerCamelCase ( self :str , **__UpperCamelCase :Optional[int] ):
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**__UpperCamelCase )
A = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 292
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292
| 1
|
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=1_00 , SCREAMING_SNAKE_CASE_ : str=13 , SCREAMING_SNAKE_CASE_ : List[str]=30 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : str=37 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=10 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=[0, 1, 2, 3] , ) -> Union[str, Any]:
'''simple docstring'''
A: Tuple = parent
A: Union[str, Any] = 1_00
A: Union[str, Any] = batch_size
A: Optional[int] = image_size
A: Union[str, Any] = patch_size
A: List[str] = num_channels
A: str = is_training
A: Union[str, Any] = use_labels
A: List[Any] = hidden_size
A: int = num_hidden_layers
A: Union[str, Any] = num_attention_heads
A: Any = intermediate_size
A: str = hidden_act
A: int = hidden_dropout_prob
A: List[str] = attention_probs_dropout_prob
A: str = type_sequence_label_size
A: Union[str, Any] = initializer_range
A: List[str] = scope
A: Union[str, Any] = out_indices
A: int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A: int = (image_size // patch_size) ** 2
A: List[Any] = num_patches + 1
def _snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A: str = None
A: Tuple = None
if self.use_labels:
A: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A: Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A: Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
'''simple docstring'''
A: List[Any] = BeitModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A: List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: Optional[int] = BeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A: Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
A: List[str] = self.type_sequence_label_size
A: Optional[int] = BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A: int = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A: Optional[Any] = 1
A: Any = BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A: str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A: List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
'''simple docstring'''
A: Tuple = self.num_labels
A: Union[str, Any] = BeitForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A: List[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A: Tuple = self.prepare_config_and_inputs()
A , A , A , A: Tuple = config_and_inputs
A: List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[str] = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Optional[Any] = False
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A: List[Any] = BeitModelTester(self )
A: List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass
def _snake_case ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A , A: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A: str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A , A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A: Any = model_class(SCREAMING_SNAKE_CASE_ )
A: str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A: str = [*signature.parameters.keys()]
A: Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[Any] ) -> Any:
'''simple docstring'''
A: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : str ) -> List[str]:
'''simple docstring'''
A: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
A , A: Any = self.model_tester.prepare_config_and_inputs_for_common()
A: List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(SCREAMING_SNAKE_CASE_ ), BeitForMaskedImageModeling]:
continue
A: Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
A: int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
A: int = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A , A: int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A: List[Any] = False
A: int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(SCREAMING_SNAKE_CASE_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A: Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
A: Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
A: Tuple = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A , A: Any = self.model_tester.prepare_config_and_inputs_for_common()
A: Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
A: List[Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A: str = BeitModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Any ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Union[str, Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(SCREAMING_SNAKE_CASE_ )
A: int = self.default_image_processor
A: Union[str, Any] = prepare_img()
A: int = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE_ )
# prepare bool_masked_pos
A: Optional[Any] = torch.ones((1, 1_96) , dtype=torch.bool ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
A: Optional[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_ , bool_masked_pos=SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = outputs.logits
# verify the logits
A: List[str] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
A: List[str] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) )
@slow
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
A: List[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(SCREAMING_SNAKE_CASE_ )
A: str = self.default_image_processor
A: int = prepare_img()
A: Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
A: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = outputs.logits
# verify the logits
A: int = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
A: int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
A: int = 2_81
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Union[str, Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
SCREAMING_SNAKE_CASE_ )
A: int = self.default_image_processor
A: int = prepare_img()
A: Any = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
A: str = model(**SCREAMING_SNAKE_CASE_ )
A: List[Any] = outputs.logits
# verify the logits
A: List[str] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
A: List[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
A: List[str] = 23_96
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A: Dict = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
A: Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
A: List[str] = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE_ , size=6_40 , do_center_crop=SCREAMING_SNAKE_CASE_ )
A: int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A: Union[str, Any] = Image.open(ds[0]['''file'''] )
A: Tuple = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
A: List[str] = model(**SCREAMING_SNAKE_CASE_ )
A: Tuple = outputs.logits
# verify the logits
A: Union[str, Any] = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
A: int = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
A: int = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=SCREAMING_SNAKE_CASE_ , )
else:
A: Any = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=SCREAMING_SNAKE_CASE_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def _snake_case ( self : Tuple ) -> str:
'''simple docstring'''
A: Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
A: Tuple = model.to(SCREAMING_SNAKE_CASE_ )
A: Tuple = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE_ , size=6_40 , do_center_crop=SCREAMING_SNAKE_CASE_ )
A: int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A: Optional[Any] = Image.open(ds[0]['''file'''] )
A: Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
A: int = model(**SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = outputs.logits.detach().cpu()
A: Any = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(5_00, 3_00)] )
A: List[Any] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
A: List[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
A: List[str] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 334
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = DebertaTokenizer
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = DebertaTokenizerFast
def _snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A: Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A: Union[str, Any] = {'''unk_token''': '''[UNK]'''}
A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = '''lower newer'''
A: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: str = self.get_tokenizer()
A: Any = '''lower newer'''
A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokens + [tokenizer.unk_token]
A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
A: str = self.get_tokenizer()
A: List[str] = tokenizer('''Hello''' , '''World''' )
A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']]
# fmt: off
A: Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A: Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 334
| 1
|
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = set_counts
UpperCamelCase_ = max(snake_case__ )
UpperCamelCase_ = len(snake_case__ )
UpperCamelCase_ = [1] * num_sets
UpperCamelCase_ = list(range(snake_case__ ) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.get_parent(snake_case__ )
UpperCamelCase_ = self.get_parent(snake_case__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase_ = 0
UpperCamelCase_ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase_ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase_ = 0
UpperCamelCase_ = src_parent
UpperCamelCase_ = self.set_counts[src_parent]
UpperCamelCase_ = max(self.max_set , snake_case__ )
return True
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase_ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 128
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=50 , snake_case__=0.02 , snake_case__=True , snake_case__=None , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = use_labels
UpperCamelCase_ = scope
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ):
'''simple docstring'''
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ )
UpperCamelCase_ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = BertGenerationDecoder(config=snake_case__ ).to(snake_case__ ).eval()
# first forward pass
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationDecoder(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase (a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoderTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = "bert"
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*snake_case__ )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(snake_case__ )
@require_torch
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase_ = model(snake_case__ )[0]
UpperCamelCase_ = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@require_torch
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase_ = model(snake_case__ )[0]
UpperCamelCase_ = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 128
| 1
|
'''simple docstring'''
import cva
import numpy as np
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
A : Tuple = k
A : Tuple = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
"""simple docstring"""
return str(self.k )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
A : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE , 0 )
A, A : Optional[Any] = img.shape
A : Dict = []
A : List[str] = img.copy()
A : Optional[int] = cva.cvtColor(SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
A, A : Optional[Any] = np.gradient(SCREAMING_SNAKE_CASE )
A : Optional[Any] = dx**2
A : Any = dy**2
A : Optional[Any] = dx * dy
A : Optional[Any] = 0.04
A : Union[str, Any] = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE , h - offset ):
for x in range(SCREAMING_SNAKE_CASE , w - offset ):
A : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : Union[str, Any] = (wxx * wyy) - (wxy**2)
A : int = wxx + wyy
A : int = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowercase : List[Any] = HarrisCorner(0.04, 3)
lowercase , lowercase : List[Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 353
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , )
def lowercase_ ( self , __lowercase = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase )
def lowercase_ ( self ) -> Dict:
self.enable_attention_slicing(__lowercase )
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , __lowercase = None , **__lowercase , ) -> List[Any]:
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : List[str] = 1
elif isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Dict = len(__lowercase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__lowercase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase )}.""" )
# get prompt text embeddings
lowerCAmelCase_ : List[str] = self.tokenizer(
__lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCAmelCase_ : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCAmelCase_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ : Union[str, Any] = text_embeddings.shape
lowerCAmelCase_ : List[str] = text_embeddings.repeat(1 , __lowercase , 1 )
lowerCAmelCase_ : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ : List[str]
if negative_prompt is None:
lowerCAmelCase_ : Any = [""""""]
elif type(__lowercase ) is not type(__lowercase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__lowercase )} !="""
f""" {type(__lowercase )}.""" )
elif isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[int] = [negative_prompt]
elif batch_size != len(__lowercase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__lowercase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowerCAmelCase_ : Any = negative_prompt
lowerCAmelCase_ : Dict = text_input_ids.shape[-1]
lowerCAmelCase_ : List[Any] = self.tokenizer(
__lowercase , padding='''max_length''' , max_length=__lowercase , truncation=__lowercase , return_tensors='''pt''' , )
lowerCAmelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ : int = uncond_embeddings.shape[1]
lowerCAmelCase_ : List[Any] = uncond_embeddings.repeat(__lowercase , __lowercase , 1 )
lowerCAmelCase_ : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowerCAmelCase_ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase_ : Dict = torch.randn(
__lowercase , generator=__lowercase , device='''cpu''' , dtype=__lowercase ).to(self.device )
lowerCAmelCase_ : int = torch.randn(__lowercase , generator=__lowercase , device='''cpu''' , dtype=__lowercase ).to(
self.device )
else:
lowerCAmelCase_ : int = torch.randn(
__lowercase , generator=__lowercase , device=self.device , dtype=__lowercase )
lowerCAmelCase_ : int = torch.randn(__lowercase , generator=__lowercase , device=self.device , dtype=__lowercase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCAmelCase_ : Optional[Any] = latents_reference.to(self.device )
lowerCAmelCase_ : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCAmelCase_ : int = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCAmelCase_ : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCAmelCase_ : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCAmelCase_ : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCAmelCase_ : Optional[int] = 0 if dx < 0 else dx
lowerCAmelCase_ : List[str] = 0 if dy < 0 else dy
lowerCAmelCase_ : Union[str, Any] = max(-dx , 0 )
lowerCAmelCase_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCAmelCase_ : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase_ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ : Optional[Any] = {}
if accepts_eta:
lowerCAmelCase_ : List[str] = eta
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : str = self.scheduler.scale_model_input(__lowercase , __lowercase )
# predict the noise residual
lowerCAmelCase_ : Any = self.unet(__lowercase , __lowercase , encoder_hidden_states=__lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase_ : Any = noise_pred.chunk(2 )
lowerCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : Dict = self.scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = 1 / 0.1_82_15 * latents
lowerCAmelCase_ : Tuple = self.vae.decode(__lowercase ).sample
lowerCAmelCase_ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCAmelCase_ : int = self.feature_extractor(self.numpy_to_pil(__lowercase ) , return_tensors='''pt''' ).to(
self.device )
lowerCAmelCase_ : int = self.safety_checker(
images=__lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCAmelCase_ : Any = None
if output_type == "pil":
lowerCAmelCase_ : int = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__lowercase , nsfw_content_detected=__lowercase )
| 262
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Optional[Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
a__ ='marian'
a__ =['past_key_values']
a__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A=5_8_1_0_1 , A=None , A=1_0_2_4 , A=1_2 , A=4_0_9_6 , A=1_6 , A=1_2 , A=4_0_9_6 , A=1_6 , A=0.0 , A=0.0 , A=True , A=True , A="gelu" , A=1_0_2_4 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=5_8_1_0_0 , A=False , A=5_8_1_0_0 , A=0 , A=0 , A=True , **A , ) -> str:
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : List[str] = decoder_vocab_size or vocab_size
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Any = encoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : List[Any] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_ffn_dim
_UpperCAmelCase : Dict = decoder_layers
_UpperCAmelCase : Optional[int] = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Optional[int] = activation_function
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : List[Any] = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : Dict = use_cache
_UpperCAmelCase : Dict = encoder_layers
_UpperCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Optional[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
class _UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowerCAmelCase ( self ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCAmelCase : List[Any] = {0: 'batch'}
_UpperCAmelCase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCAmelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
_UpperCAmelCase : Tuple = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCAmelCase : Tuple = self.num_layers
for i in range(_UpperCAmelCase ):
_UpperCAmelCase : int = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase : int = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_UpperCAmelCase : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowerCAmelCase ( self ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : int = super().outputs
else:
_UpperCAmelCase : int = super(_UpperCAmelCase , self ).outputs
if self.use_past:
_UpperCAmelCase : int = self.num_layers
for i in range(_UpperCAmelCase ):
_UpperCAmelCase : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Union[str, Any]:
_UpperCAmelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Generate decoder inputs
_UpperCAmelCase : List[str] = seq_length if not self.use_past else 1
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Dict = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase : int = dict(**_UpperCAmelCase , **_UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCAmelCase : str = common_inputs['input_ids'].shape
_UpperCAmelCase : Tuple = common_inputs['decoder_input_ids'].shape[1]
_UpperCAmelCase : Optional[int] = self.num_attention_heads
_UpperCAmelCase : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : Optional[Any] = decoder_seq_length + 3
_UpperCAmelCase : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase )] , dim=1 )
_UpperCAmelCase : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase : Optional[Any] = self.num_layers
_UpperCAmelCase : List[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Optional[int] = max(_UpperCAmelCase , _UpperCAmelCase ) - min_num_layers
_UpperCAmelCase : int = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
) )
# TODO: test this.
_UpperCAmelCase : Optional[int] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_UpperCAmelCase , _UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) )
return common_inputs
def __lowerCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Any:
_UpperCAmelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCAmelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Optional[int] = seqlen + 2
_UpperCAmelCase : List[str] = self.num_layers
_UpperCAmelCase : Tuple = self.num_attention_heads
_UpperCAmelCase : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : List[str] = common_inputs['attention_mask'].dtype
_UpperCAmelCase : Tuple = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
_UpperCAmelCase : int = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(_UpperCAmelCase )
]
return common_inputs
def __lowerCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> str:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase : List[str] = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase : List[Any] = tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase : Tuple = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase : Any = dict(tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) )
return common_inputs
def __lowerCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
else:
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
return common_inputs
def __lowerCAmelCase ( self , A , A , A , A ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[str] = super()._flatten_past_key_values_(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
_UpperCAmelCase : Tuple = super(_UpperCAmelCase , self )._flatten_past_key_values_(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@property
def __lowerCAmelCase ( self ) -> int:
return 1E-4
| 368
|
"""simple docstring"""
from itertools import count
def lowerCamelCase_ (UpperCamelCase__ : int = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 68
| 0
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _UpperCamelCase ( UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _snake_case :
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
pass
def snake_case__ ( self):
pass
def snake_case__ ( self):
pass
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = np.abs((a - b)).max()
self.assertLessEqual(_lowerCamelCase , _lowerCamelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase):
UpperCAmelCase__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Dict = FlaxVisionTextDualEncoderModel(_lowerCamelCase)
UpperCAmelCase__ : Tuple = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : int = {"""vision_model""": vision_model, """text_model""": text_model}
UpperCAmelCase__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase)
UpperCAmelCase__ : List[str] = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase)
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = {"""vision_model""": vision_model, """text_model""": text_model}
UpperCAmelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase)
UpperCAmelCase__ : str = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase)
UpperCAmelCase__ : int = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase)
UpperCAmelCase__ : List[str] = after_output[0]
UpperCAmelCase__ : Optional[int] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_lowerCamelCase , 1e-3)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
UpperCAmelCase__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = model(
input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase)
UpperCAmelCase__ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_lowerCamelCase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : str = to_atuple(vision_model.config.image_size)
UpperCAmelCase__ : List[str] = to_atuple(vision_model.config.patch_size)
UpperCAmelCase__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase__ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
UpperCAmelCase__ : int = output.text_model_output.attentions
self.assertEqual(len(_lowerCamelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
pt_model.to(_lowerCamelCase)
pt_model.eval()
# prepare inputs
UpperCAmelCase__ : List[str] = inputs_dict
UpperCAmelCase__ : Dict = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = pt_model(**_lowerCamelCase).to_tuple()
UpperCAmelCase__ : List[str] = fx_model(**_lowerCamelCase).to_tuple()
self.assertEqual(len(_lowerCamelCase) , len(_lowerCamelCase) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : int = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Tuple = fx_model_loaded(**_lowerCamelCase).to_tuple()
self.assertEqual(len(_lowerCamelCase) , len(_lowerCamelCase) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_flax=_lowerCamelCase)
pt_model_loaded.to(_lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase__ : Dict = pt_model_loaded(**_lowerCamelCase).to_tuple()
self.assertEqual(len(_lowerCamelCase) , len(_lowerCamelCase) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_lowerCamelCase , pt_output_loaded.numpy() , 4e-2)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[int] = VisionTextDualEncoderModel(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = FlaxVisionTextDualEncoderModel(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = VisionTextDualEncoderModel(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = FlaxVisionTextDualEncoderModel(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = load_flax_weights_in_pytorch_model(_lowerCamelCase , fx_model.params)
self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCamelCase)
@is_pt_flax_cross_test
def snake_case__ ( self):
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[int] = config_inputs_dict.pop("""vision_config""")
UpperCAmelCase__ : List[Any] = config_inputs_dict.pop("""text_config""")
UpperCAmelCase__ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
self.check_equivalence_flax_to_pt(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_pretrained_model_and_inputs()
UpperCAmelCase__ : str = model_a(**_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase)
UpperCAmelCase__ : List[str] = model_a(**_lowerCamelCase)
UpperCAmelCase__ : List[Any] = after_outputs[0]
UpperCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_lowerCamelCase , 1e-5)
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , )
UpperCAmelCase__ : str = 13
UpperCAmelCase__ : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
UpperCAmelCase__ : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
UpperCAmelCase__ : Any = random_attention_mask([batch_size, 4])
UpperCAmelCase__ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = FlaxViTModel(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = FlaxBertModel(_lowerCamelCase)
return vision_model, text_model
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = FlaxViTModelTester(self)
UpperCAmelCase__ : Tuple = FlaxBertModelTester(self)
UpperCAmelCase__ : int = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : Any = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _snake_case ( a__ , unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , )
UpperCAmelCase__ : Union[str, Any] = 13
UpperCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
UpperCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
UpperCAmelCase__ : int = random_attention_mask([batch_size, 4])
UpperCAmelCase__ : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : int = FlaxCLIPVisionModel(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = FlaxBertModel(_lowerCamelCase)
return vision_model, text_model
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = FlaxCLIPVisionModelTester(self)
UpperCAmelCase__ : Any = FlaxBertModelTester(self)
UpperCAmelCase__ : List[Any] = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0)
UpperCAmelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""")
UpperCAmelCase__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
UpperCAmelCase__ : Optional[Any] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""np""")
UpperCAmelCase__ : Any = model(**_lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase__ : Optional[Any] = np.array([[1.2284727, 0.3104122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _lowerCamelCase , atol=1e-3))
| 163
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( a__ ):
lowerCAmelCase :Dict = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Union[str, Any] = '''BlipImageProcessor'''
lowerCAmelCase :Any = '''AutoTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
# add QFormer tokenizer
UpperCAmelCase__ : List[str] = qformer_tokenizer
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""")
UpperCAmelCase__ : List[str] = BatchFeature()
if text is not None:
UpperCAmelCase__ : Any = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
encoding.update(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.qformer_tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Dict = qformer_text_encoding.pop("""input_ids""")
UpperCAmelCase__ : Tuple = qformer_text_encoding.pop("""attention_mask""")
if images is not None:
UpperCAmelCase__ : List[str] = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase):
if os.path.isfile(_lowerCamelCase):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''')
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , """qformer_tokenizer""")
self.qformer_tokenizer.save_pretrained(_lowerCamelCase)
return super().save_pretrained(_lowerCamelCase , **_lowerCamelCase)
@classmethod
def snake_case__ ( cls , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_lowerCamelCase , subfolder="""qformer_tokenizer""")
UpperCAmelCase__ : List[Any] = cls._get_arguments_from_pretrained(_lowerCamelCase , **_lowerCamelCase)
args.append(_lowerCamelCase)
return cls(*_lowerCamelCase)
| 163
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : str ={'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['YolosFeatureExtractor']
__snake_case : str =['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any =[
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__snake_case : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] ={
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 94
| 1
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Union[str, Any] =StableDiffusionSAGPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Dict =TEXT_TO_IMAGE_BATCH_PARAMS
a : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
a : Optional[int] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""),up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""),cross_attention_dim=32,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__lowerCAmelCase = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """."""
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sag_pipe(
[prompt],generator=__SCREAMING_SNAKE_CASE,guidance_scale=7.5,sag_scale=1.0,num_inference_steps=20,output_type="""np""" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__lowerCAmelCase = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """."""
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sag_pipe(
[prompt],generator=__SCREAMING_SNAKE_CASE,guidance_scale=7.5,sag_scale=1.0,num_inference_steps=20,output_type="""np""" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__lowerCAmelCase = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """."""
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sag_pipe(
[prompt],width=7_68,height=5_12,generator=__SCREAMING_SNAKE_CASE,guidance_scale=7.5,sag_scale=1.0,num_inference_steps=20,output_type="""np""",)
__lowerCAmelCase = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 46
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names,["""col_1""", """col_2"""] )
for i, r in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertDictEqual(__SCREAMING_SNAKE_CASE,example_records[i] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info,dset_from_dict.info )
def lowerCamelCase__ ( self ): # checks what happens with missing columns
'''simple docstring'''
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0],{"""col_1""": 1} )
self.assertDictEqual(dset[1],{"""col_1""": None} ) # NB: first record is used for columns
def lowerCamelCase__ ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["""col_1"""],Sequence(Value("""int64""" ) ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),0 )
self.assertListEqual(dset.column_names,[] )
| 46
| 1
|
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["keras_nlp"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['keras_nlp'] )
| 39
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = int(number**0.5 )
return number == sq * sq
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> tuple[int, int]:
'''simple docstring'''
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 3_5 ) -> int:
'''simple docstring'''
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ):
lowercase = int(sqrt(UpperCAmelCase__ ) )
lowercase = int(sqrt(UpperCAmelCase__ ) )
lowercase = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ):
lowercase = int(sqrt(UpperCAmelCase__ ) )
lowercase = int(sqrt(UpperCAmelCase__ ) )
lowercase = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
for num, den in unique_s:
total += Fraction(UpperCAmelCase__ , UpperCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 371
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 0
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@require_torch
def __a ( self ) -> Dict:
a : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : List[str] = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase__ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
] , )
@require_tf
def __a ( self ) -> int:
a : Tuple = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
a : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : int = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
a : List[str] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
] , )
@slow
@require_torch
def __a ( self ) -> Union[str, Any]:
a : Optional[int] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : str = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a : List[str] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __a ( self ) -> Optional[Any]:
a : List[str] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[Any] = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 105
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCamelCase = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 68
| 0
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'char'
lowercase__ = 'bpe'
lowercase__ = 'wp'
_a = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'char_tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = 'MgpstrTokenizer'
def __init__( self , __a=None , __a=None , **__a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
_UpperCamelCase = tokenizer
_UpperCamelCase = AutoTokenizer.from_pretrained('''gpt2''')
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , **__a) -> int:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''')
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if text is not None:
_UpperCamelCase = self.char_tokenizer(__a , return_tensors=__a , **__a)
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCamelCase = encodings['''input_ids''']
return inputs
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = sequences
_UpperCamelCase = char_preds.size(0)
_UpperCamelCase , _UpperCamelCase = self._decode_helper(__a , '''char''')
_UpperCamelCase , _UpperCamelCase = self._decode_helper(__a , '''bpe''')
_UpperCamelCase , _UpperCamelCase = self._decode_helper(__a , '''wp''')
_UpperCamelCase = []
_UpperCamelCase = []
for i in range(__a):
_UpperCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
_UpperCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
_UpperCamelCase = scores.index(max(__a))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
_UpperCamelCase = {}
_UpperCamelCase = final_strs
_UpperCamelCase = final_scores
_UpperCamelCase = char_strs
_UpperCamelCase = bpe_strs
_UpperCamelCase = wp_strs
return out
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
_UpperCamelCase = self.char_decode
_UpperCamelCase = 1
_UpperCamelCase = '''[s]'''
elif format == DecodeType.BPE:
_UpperCamelCase = self.bpe_decode
_UpperCamelCase = 2
_UpperCamelCase = '''#'''
elif format == DecodeType.WORDPIECE:
_UpperCamelCase = self.wp_decode
_UpperCamelCase = 1_02
_UpperCamelCase = '''[SEP]'''
else:
raise ValueError(F'''Format {format} is not supported.''')
_UpperCamelCase , _UpperCamelCase = [], []
_UpperCamelCase = pred_logits.size(0)
_UpperCamelCase = pred_logits.size(1)
_UpperCamelCase , _UpperCamelCase = pred_logits.topk(1 , dim=-1 , largest=__a , sorted=__a)
_UpperCamelCase = preds_index.view(-1 , __a)[:, 1:]
_UpperCamelCase = decoder(__a)
_UpperCamelCase , _UpperCamelCase = torch.nn.functional.softmax(__a , dim=2).max(dim=2)
_UpperCamelCase = preds_max_prob[:, 1:]
for index in range(__a):
_UpperCamelCase = preds_str[index].find(__a)
_UpperCamelCase = preds_str[index][:pred_eos]
_UpperCamelCase = preds_index[index].cpu().tolist()
_UpperCamelCase = pred_index.index(__a) if eos_token in pred_index else -1
_UpperCamelCase = preds_max_prob[index][: pred_eos_index + 1]
_UpperCamelCase = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__a)
conf_scores.append(__a)
return dec_strs, conf_scores
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = [seq.replace(''' ''' , '''''') for seq in self.char_tokenizer.batch_decode(__a)]
return decode_strs
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__a)
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [seq.replace(''' ''' , '''''') for seq in self.wp_tokenizer.batch_decode(__a)]
return decode_strs
| 100
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = _TestCommandArgs(dataset=__snake_case, all_configs=__snake_case, save_infos=__snake_case )
_UpperCamelCase = TestCommand(*__snake_case )
test_command.run()
_UpperCamelCase = os.path.join(__snake_case, '''README.md''' )
assert os.path.exists(__snake_case )
_UpperCamelCase = DatasetInfosDict.from_directory(__snake_case )
_UpperCamelCase = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ), splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
], download_size=3_94_06_80, dataset_size=2_58_99_81, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_UpperCamelCase , _UpperCamelCase = getattr(dataset_infos['''default'''], __snake_case ), getattr(expected_dataset_infos['''default'''], __snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case, __snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 100
| 1
|
import qiskit
def __lowerCamelCase ( UpperCAmelCase_ : int = 2 ):
"""simple docstring"""
a :Tuple = qubits
# Using Aer's simulator
a :Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
a :str = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase_ ) ) , list(range(UpperCAmelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
a :Union[str, Any] = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 94
|
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ):
"""simple docstring"""
a , a :Optional[Any] = set(UpperCAmelCase_ ), [start]
while stack:
a :Optional[int] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
snake_case : Optional[int] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 94
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = list(range(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = [v / w for v, w in zip(lowerCamelCase__ , lowerCamelCase__ )]
index.sort(key=lambda lowerCamelCase__ : ratio[i] , reverse=lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = [0] * len(lowerCamelCase__ )
for i in index:
if weight[i] <= capacity:
lowerCamelCase_ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from collections import defaultdict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('''Enter the first string ''').strip()
__A =input('''Enter the second string ''').strip()
__A =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 47
| 0
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Tuple:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Dict:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
lowerCAmelCase = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = BioGptForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Optional[Any]:
lowerCAmelCase = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
# create attention mask
lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
lowerCAmelCase = self.seq_length // 2
lowerCAmelCase = 0
# first forward pass
lowerCAmelCase , lowerCAmelCase = model(lowercase , attention_mask=lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCAmelCase = ids_tensor((1,) , lowercase ).item() + 1
lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , )
# get two different outputs
lowerCAmelCase = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
lowerCAmelCase = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Optional[Any]:
lowerCAmelCase = BioGptModel(config=lowercase ).to(lowercase ).eval()
lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
# first forward pass
lowerCAmelCase = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
lowerCAmelCase , lowerCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
lowerCAmelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[
"""last_hidden_state"""
]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = BioGptForCausalLM(lowercase )
model.to(lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _snake_case ( self , lowercase , *lowercase ) -> List[str]:
lowerCAmelCase = BioGptModel(lowercase )
lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = BioGptForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (BioGptForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = BioGptModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _snake_case ( self ) -> Any:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = """left"""
# Define PAD Token = EOS Token = 50256
lowerCAmelCase = tokenizer.eos_token
lowerCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
lowerCAmelCase = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCAmelCase = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase )
lowerCAmelCase = inputs["""input_ids"""].to(lowercase )
lowerCAmelCase = model.generate(
input_ids=lowercase , attention_mask=inputs["""attention_mask"""].to(lowercase ) , )
lowerCAmelCase = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(lowercase )
lowerCAmelCase = model.generate(input_ids=lowercase )
lowerCAmelCase = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
lowerCAmelCase = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(lowercase )
lowerCAmelCase = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
lowerCAmelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
lowerCAmelCase = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
@slow
def _snake_case ( self ) -> Optional[int]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = BioGptModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(lowercase )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = """multi_label_classification"""
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(lowercase )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = torch.tensor([[2, 4_805, 9, 656, 21]] )
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = 42_384
lowerCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
@slow
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
torch.manual_seed(0 )
lowerCAmelCase = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(lowercase )
lowerCAmelCase = model.generate(
**lowercase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=lowercase , )
lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase )
lowerCAmelCase = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(lowercase , lowercase )
| 46
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.18_215 , lowercase = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
lowerCAmelCase = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase )
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(lowercase )
lowerCAmelCase = self.quant_conv(lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = False , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(lowercase )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(lowercase )
lowerCAmelCase = self.decoder(lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
def _snake_case ( self , lowercase , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(lowercase ).latents
lowerCAmelCase = self.decode(lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=16 ,snake_case=36 ,snake_case=6 ,snake_case=6 ,snake_case=6 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : Union[str, Any] = parent
lowercase : Dict = batch_size
lowercase : Optional[int] = seq_length
lowercase : Union[str, Any] = is_training
lowercase : Dict = use_input_mask
lowercase : Dict = use_token_type_ids
lowercase : str = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : int = embedding_size
lowercase : List[str] = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Optional[Any] = num_hidden_groups
lowercase : List[Any] = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Any = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : List[str] = type_vocab_size
lowercase : int = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : int = num_labels
lowercase : Optional[Any] = num_choices
lowercase : Union[str, Any] = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : Union[str, Any] = None
if self.use_input_mask:
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[int] = None
if self.use_token_type_ids:
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase : Tuple = None
lowercase : Any = None
lowercase : Any = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case )
lowercase : str = model(snake_case ,token_type_ids=snake_case )
lowercase : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : Union[str, Any] = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case ,sentence_order_label=snake_case ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : int = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,start_positions=snake_case ,end_positions=snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = self.num_labels
lowercase : Tuple = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase : Tuple = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.num_labels
lowercase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : int = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Any = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Tuple = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Dict = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str = config_and_inputs
lowercase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Tuple= (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a : List[str]= (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : Dict= True
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case=False ):
'''simple docstring'''
lowercase : List[str] = super()._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
lowercase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=snake_case )
lowercase : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=snake_case )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = AlbertModelTester(self )
lowercase : Optional[int] = ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Optional[Any] = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = AlbertModel.from_pretrained("""albert-base-v2""" )
lowercase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : List[str] = model(snake_case ,attention_mask=snake_case )[0]
lowercase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,snake_case )
lowercase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case ,atol=1e-4 ) )
| 20
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = TextToVideoSDPipeline
snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'np'
a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
a_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a_ : Optional[Any] = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames
a_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Tuple = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames
a_ : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 32
| 0
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : int="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(UpperCamelCase__, UpperCamelCase__, repo_type='''dataset''' ), '''r''' ) as f:
UpperCamelCase__ = json.load(UpperCamelCase__ )
UpperCamelCase__ = {}
UpperCamelCase__ = []
UpperCamelCase__ = []
for key, info in class_info.items():
UpperCamelCase__ = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(UpperCamelCase__ ) )
UpperCamelCase__ = thing_ids
UpperCamelCase__ = class_names
return metadata
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _a : Any , _a : List[Any]=7 , _a : Optional[int]=3 , _a : List[Any]=30 , _a : Union[str, Any]=400 , _a : Optional[int]=None , _a : List[Any]=True , _a : int=True , _a : Tuple=[0.5, 0.5, 0.5] , _a : Any=[0.5, 0.5, 0.5] , _a : List[str]=10 , _a : List[str]=False , _a : Optional[Any]=255 , _a : Any="shi-labs/oneformer_demo" , _a : Optional[Any]="ade20k_panoptic.json" , _a : List[Any]=10 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = {'''shortest_edge''': 32, '''longest_edge''': 1_333} if size is None else size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = class_info_file
UpperCamelCase__ = prepare_metadata(_a , _a )
UpperCamelCase__ = num_text
UpperCamelCase__ = repo_path
# for the post_process_functions
UpperCamelCase__ = 2
UpperCamelCase__ = 10
UpperCamelCase__ = 10
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = num_labels
UpperCamelCase__ = do_reduce_labels
UpperCamelCase__ = ignore_index
def A_ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A_ ( self : Any , _a : Tuple , _a : Union[str, Any]=False ):
if not batched:
UpperCamelCase__ = image_inputs[0]
if isinstance(_a , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image.size
else:
UpperCamelCase__ , UpperCamelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ = self.size['''shortest_edge''']
UpperCamelCase__ = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ = self.size['''shortest_edge''']
UpperCamelCase__ = self.size['''shortest_edge''']
else:
UpperCamelCase__ = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ = max(_a , key=lambda _a : item[0] )[0]
UpperCamelCase__ = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
def A_ ( self : List[str] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : int = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_A : Optional[int] = image_processing_class
def A_ ( self : str ):
UpperCamelCase__ = OneFormerImageProcessorTester(self )
@property
def A_ ( self : Dict ):
return self.image_processing_tester.prepare_image_processor_dict()
def A_ ( self : Any ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''ignore_index''' ) )
self.assertTrue(hasattr(_a , '''class_info_file''' ) )
self.assertTrue(hasattr(_a , '''num_text''' ) )
self.assertTrue(hasattr(_a , '''repo_path''' ) )
self.assertTrue(hasattr(_a , '''metadata''' ) )
self.assertTrue(hasattr(_a , '''do_reduce_labels''' ) )
def A_ ( self : Optional[Any] ):
pass
def A_ ( self : Tuple ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ = self.image_processing_tester.get_expected_values(_a , batched=_a )
UpperCamelCase__ = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ = self.image_processing_tester.get_expected_values(_a , batched=_a )
UpperCamelCase__ = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[Any] ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ = self.image_processing_tester.get_expected_values(_a , batched=_a )
UpperCamelCase__ = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] , _a : int=False , _a : List[str]=False , _a : Tuple="np" ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase__ = self.image_processing_tester.num_labels
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
if with_segmentation_maps:
UpperCamelCase__ = num_labels
if is_instance_map:
UpperCamelCase__ = list(range(_a ) ) * 2
UpperCamelCase__ = dict(enumerate(_a ) )
UpperCamelCase__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase__ = [Image.fromarray(_a ) for annotation in annotations]
UpperCamelCase__ = image_processor(
_a , ['''semantic'''] * len(_a ) , _a , return_tensors='''pt''' , instance_id_to_semantic_id=_a , pad_and_return_pixel_mask=_a , )
return inputs
def A_ ( self : int ):
pass
def A_ ( self : Optional[Any] ):
def common(_a : int=False , _a : Optional[int]=None ):
UpperCamelCase__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=_a , is_instance_map=_a , segmentation_type=_a )
UpperCamelCase__ = inputs['''mask_labels''']
UpperCamelCase__ = inputs['''class_labels''']
UpperCamelCase__ = inputs['''pixel_values''']
UpperCamelCase__ = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_a , _a , _a ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_a ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_a )
common(is_instance_map=_a , segmentation_type='''pil''' )
common(is_instance_map=_a , segmentation_type='''pil''' )
def A_ ( self : Any ):
UpperCamelCase__ = np.zeros((20, 50) )
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = binary_mask_to_rle(_a )
self.assertEqual(len(_a ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A_ ( self : int ):
UpperCamelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCamelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ = fature_extractor.post_process_semantic_segmentation(_a )
self.assertEqual(len(_a ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase__ = fature_extractor.post_process_semantic_segmentation(_a , target_sizes=_a )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCamelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ = image_processor.post_process_instance_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _a )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A_ ( self : Dict ):
UpperCamelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCamelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ = image_processor.post_process_panoptic_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _a )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 363
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase = get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , _a : Optional[str] = None ):
UpperCamelCase__ = (
os.path.join(_a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ = Extractor
def A_ ( self : str , _a : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ = os.path.abspath(_a )
return os.path.join(self.extract_dir , hash_url_to_filename(_a ) )
def A_ ( self : Optional[Any] , _a : str , _a : bool ):
return force_extract or (
not os.path.isfile(_a ) and not (os.path.isdir(_a ) and os.listdir(_a ))
)
def A_ ( self : int , _a : str , _a : bool = False ):
UpperCamelCase__ = self.extractor.infer_extractor_format(_a )
if not extractor_format:
return input_path
UpperCamelCase__ = self._get_output_path(_a )
if self._do_extract(_a , _a ):
self.extractor.extract(_a , _a , _a )
return output_path
class __lowercase ( A ):
'''simple docstring'''
@classmethod
@abstractmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , **_a : List[str] ):
...
@staticmethod
@abstractmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
...
class __lowercase ( A, A ):
'''simple docstring'''
_A : List[bytes] = []
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
with open(_a , '''rb''' ) as f:
return f.read(_a )
@classmethod
def A_ ( cls : str , _a : Union[Path, str] , _a : bytes = b"" ):
if not magic_number:
UpperCamelCase__ = max(len(_a ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ = cls.read_magic_number(_a , _a )
except OSError:
return False
return any(magic_number.startswith(_a ) for cls_magic_number in cls.magic_numbers )
class __lowercase ( A ):
'''simple docstring'''
@classmethod
def A_ ( cls : Union[str, Any] , _a : Union[Path, str] , **_a : Any ):
return tarfile.is_tarfile(_a )
@staticmethod
def A_ ( _a : int , _a : List[str] ):
def resolved(_a : str ) -> str:
return os.path.realpath(os.path.abspath(_a ) )
def badpath(_a : str , _a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_a , _a ) ).startswith(_a )
def badlink(_a : Tuple , _a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ = resolved(os.path.join(_a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_a )
UpperCamelCase__ = resolved(_a )
for finfo in members:
if badpath(finfo.name , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = tarfile.open(_a )
tar_file.extractall(_a , members=TarExtractor.safemembers(_a , _a ) )
tar_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : int = [b'''\x1F\x8B''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with gzip.open(_a , '''rb''' ) as gzip_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : int = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def A_ ( cls : Dict , _a : Union[Path, str] , _a : bytes = b"" ):
if super().is_extractable(_a , magic_number=_a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_a , '''rb''' ) as fp:
UpperCamelCase__ = _EndRecData(_a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ = fp.read(_a ) # CD is where we expect it to be
if len(_a ) == sizeCentralDir:
UpperCamelCase__ = struct.unpack(_a , _a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
with zipfile.ZipFile(_a , '''r''' ) as zip_file:
zip_file.extractall(_a )
zip_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with lzma.open(_a ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = rarfile.RarFile(_a )
rf.extractall(_a )
rf.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCamelCase__ = zstd.ZstdDecompressor()
with open(_a , '''rb''' ) as ifh, open(_a , '''wb''' ) as ofh:
dctx.copy_stream(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Any = [b'''\x42\x5A\x68''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with bza.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[int] = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_a , exist_ok=_a )
with pyazr.SevenZipFile(_a , '''r''' ) as archive:
archive.extractall(_a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''\x04\x22\x4D\x18''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase :
'''simple docstring'''
_A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A_ ( cls : Dict ):
return max(
len(_a )
for extractor in cls.extractors.values()
if issubclass(_a , _a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(_a , magic_number_length=_a )
except OSError:
return b""
@classmethod
def A_ ( cls : Optional[Any] , _a : Union[Path, str] , _a : bool = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = cls.infer_extractor_format(_a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A_ ( cls : str , _a : Union[Path, str] ): # <Added version="2.4.0"/>
UpperCamelCase__ = cls._get_magic_number_max_length()
UpperCamelCase__ = cls._read_magic_number(_a , _a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_a , magic_number=_a ):
return extractor_format
@classmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , _a : Union[Path, str] , _a : Optional[str] = None , _a : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(_a ) , exist_ok=_a )
# Prevent parallel extractions
UpperCamelCase__ = str(Path(_a ).with_suffix('''.lock''' ) )
with FileLock(_a ):
shutil.rmtree(_a , ignore_errors=_a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_a , _a ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
UpperCamelCase__ = cls.extractors[extractor_format]
return extractor.extract(_a , _a )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_a ):
return extractor.extract(_a , _a )
| 35
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip_2_vision_model'''
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_00_01 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = qkv_bias
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''blip_2_qformer'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = cross_attention_frequency
__SCREAMING_SNAKE_CASE = encoder_hidden_size
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip-2'''
__lowercase : Any = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__):
super().__init__(**lowerCAmelCase__)
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""")
if qformer_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""")
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""")
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = BlipaQFormerConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings
__SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder
__SCREAMING_SNAKE_CASE = num_query_tokens
__SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
__SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.02
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.qformer_config.to_dict()
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 100
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 11
__SCREAMING_SNAKE_CASE = int("""1""" + """0""" * digit_len )
for num in range(UpperCamelCase_ , UpperCamelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCamelCase_ , UpperCamelCase_ ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
__SCREAMING_SNAKE_CASE = 10
return solutions
def _lowerCAmelCase ( UpperCamelCase_ = 2 ):
__SCREAMING_SNAKE_CASE = 1.0
for fraction in fraction_list(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Fraction(UpperCamelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCamelCase_ )
if __name__ == "__main__":
print(solution())
| 100
| 1
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_A : Optional[Any] = logging.get_logger(__name__)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = UNetaDModel
_UpperCAmelCase : List[Any] = "sample"
@property
def __lowerCamelCase ( self : List[str] ) ->Any:
lowerCamelCase__ : List[Any] = 4
lowerCamelCase__ : List[str] = 3
lowerCamelCase__ : Dict = (3_2, 3_2)
lowerCamelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase__ : List[Any] = torch.tensor([1_0] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
return (3, 3_2, 3_2)
@property
def __lowerCamelCase ( self : int ) ->Dict:
return (3, 3_2, 3_2)
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]:
lowerCamelCase__ : Optional[int] = {
'''block_out_channels''': (3_2, 6_4),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 3_2,
}
lowerCamelCase__ : Dict = self.dummy_input
return init_dict, inputs_dict
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = UNetaDModel
_UpperCAmelCase : Dict = "sample"
@property
def __lowerCamelCase ( self : List[str] ) ->int:
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Dict = 4
lowerCamelCase__ : Dict = (3_2, 3_2)
lowerCamelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase__ : Optional[int] = torch.tensor([1_0] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __lowerCamelCase ( self : Optional[int] ) ->int:
return (4, 3_2, 3_2)
@property
def __lowerCamelCase ( self : Dict ) ->Union[str, Any]:
return (4, 3_2, 3_2)
def __lowerCamelCase ( self : List[Any] ) ->Any:
lowerCamelCase__ : Tuple = {
'''sample_size''': 3_2,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (3_2, 6_4),
'''attention_head_dim''': 3_2,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCamelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[str] ) ->Any:
lowerCamelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(A )
lowerCamelCase__ : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def __lowerCamelCase ( self : str ) ->str:
lowerCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=A )
model.to(A )
lowerCamelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCamelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=A )
model_accelerate.to(A )
model_accelerate.eval()
lowerCamelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ : Any = noise.to(A )
lowerCamelCase__ : List[Any] = torch.tensor([1_0] * noise.shape[0] ).to(A )
lowerCamelCase__ : Optional[Any] = model_accelerate(A , A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase__ : str = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=A , low_cpu_mem_usage=A )
model_normal_load.to(A )
model_normal_load.eval()
lowerCamelCase__ : Union[str, Any] = model_normal_load(A , A )['''sample''']
assert torch_all_close(A , A , rtol=1e-3 )
def __lowerCamelCase ( self : str ) ->Dict:
lowerCamelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(A )
lowerCamelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ : List[Any] = noise.to(A )
lowerCamelCase__ : Optional[Any] = torch.tensor([1_0] * noise.shape[0] ).to(A )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(A , A ).sample
lowerCamelCase__ : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase__ : List[str] = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-3 ) )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = UNetaDModel
_UpperCAmelCase : List[str] = "sample"
@property
def __lowerCamelCase ( self : Tuple , A : Union[str, Any]=(3_2, 3_2) ) ->Any:
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Dict = 3
lowerCamelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase__ : List[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=A )
return {"sample": noise, "timestep": time_step}
@property
def __lowerCamelCase ( self : int ) ->Any:
return (3, 3_2, 3_2)
@property
def __lowerCamelCase ( self : Dict ) ->Tuple:
return (3, 3_2, 3_2)
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = {
'''block_out_channels''': [3_2, 6_4, 6_4, 6_4],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCamelCase__ : Any = self.dummy_input
return init_dict, inputs_dict
@slow
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(A )
lowerCamelCase__ : Optional[Any] = self.dummy_input
lowerCamelCase__ : Union[str, Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(A )
lowerCamelCase__ : str = noise
lowerCamelCase__ : int = model(**A )
assert image is not None, "Make sure output is not None"
@slow
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(A )
lowerCamelCase__ : Dict = 4
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : Dict = (2_5_6, 2_5_6)
lowerCamelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(A , A ).sample
lowerCamelCase__ : str = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ : Optional[int] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __lowerCamelCase ( self : List[str] ) ->Optional[int]:
lowerCamelCase__ : Tuple = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(A )
lowerCamelCase__ : List[Any] = 4
lowerCamelCase__ : int = 3
lowerCamelCase__ : Union[str, Any] = (3_2, 3_2)
lowerCamelCase__ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase__ : Dict = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(A , A ).sample
lowerCamelCase__ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase__ : str = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __lowerCamelCase ( self : Optional[Any] ) ->Any:
# not required for this model
pass
| 364
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
def run_func(UpperCAmelCase ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = random.Random()
lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def __lowerCamelCase ( self : int ) ->Optional[int]:
return tf.__version__
def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float:
# initialize GPU on separate process
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : int = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float:
lowerCamelCase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Tuple = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : int = getattr(A , A )
lowerCamelCase__ : int = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Optional[int] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : Optional[int] = getattr(A , A )
lowerCamelCase__ : Optional[Any] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Dict = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0]
lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0]
lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables )
return gradients
lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self : Tuple , A : Any ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : Optional[Any] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCamelCase__ : Union[str, Any] = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A )
lowerCamelCase__ : List[Any] = meminfo.used
lowerCamelCase__ : Union[str, Any] = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = measure_peak_memory_cpu(A )
lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A )
if memory is None:
lowerCamelCase__ : Dict = summary.total
else:
lowerCamelCase__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 265
| 0
|
from statistics import mean, stdev
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ = 3 ):
'''simple docstring'''
_UpperCAmelCase = min(_UpperCamelCase )
_UpperCAmelCase = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _UpperCamelCase ) for x in data]
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ = 3 ):
'''simple docstring'''
_UpperCAmelCase = mean(_UpperCamelCase )
_UpperCAmelCase = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma) , _UpperCamelCase ) for x in data]
| 133
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) )
def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def A ( self : Tuple , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std
return embeds
def A ( self : List[str] , _a : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean
return embeds
| 47
| 0
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a : Dict = logging.get_logger(__name__)
@add_end_docstrings(
__magic_name__ , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> np.ndarray:
if self.framework == "tf":
UpperCAmelCase : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _lowercase( self , A ) -> np.ndarray:
UpperCAmelCase : List[str] = self.get_masked_index(A )
UpperCAmelCase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def _lowercase( self , A ) -> Dict:
if isinstance(A , A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def _lowercase( self , A , A=None , **A ) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCAmelCase : List[Any] = self.framework
UpperCAmelCase : List[Any] = self.tokenizer(A , return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def _lowercase( self , A ) -> int:
UpperCAmelCase : str = self.model(**A )
UpperCAmelCase : Optional[int] = model_inputs["""input_ids"""]
return model_outputs
def _lowercase( self , A , A=5 , A=None ) -> List[str]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase : List[Any] = target_ids.shape[0]
UpperCAmelCase : Optional[int] = model_outputs["""input_ids"""][0]
UpperCAmelCase : Any = model_outputs["""logits"""]
if self.framework == "tf":
UpperCAmelCase : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase : str = outputs.numpy()
UpperCAmelCase : Tuple = outputs[0, masked_index, :]
UpperCAmelCase : Dict = stable_softmax(A , axis=-1 )
if target_ids is not None:
UpperCAmelCase : List[Any] = tf.gather_nd(tf.squeeze(A , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase : List[str] = tf.expand_dims(A , 0 )
UpperCAmelCase : int = tf.math.top_k(A , k=A )
UpperCAmelCase , UpperCAmelCase : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase : Tuple = outputs[0, masked_index, :]
UpperCAmelCase : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase : Any = probs[..., target_ids]
UpperCAmelCase , UpperCAmelCase : Optional[int] = probs.topk(A )
UpperCAmelCase : int = []
UpperCAmelCase : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase : List[Any] = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase : Optional[int] = target_ids[p].tolist()
UpperCAmelCase : str = p
# Filter padding out:
UpperCAmelCase : Union[str, Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase : Optional[int] = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Optional[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def _lowercase( self , A , A=None ) -> Any:
if isinstance(A , A ):
UpperCAmelCase : int = [targets]
try:
UpperCAmelCase : Optional[int] = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase : int = {}
UpperCAmelCase : Any = []
for target in targets:
UpperCAmelCase : Optional[int] = vocab.get(A , A )
if id_ is None:
UpperCAmelCase : Optional[Any] = self.tokenizer(
A , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , max_length=1 , truncation=A , )["""input_ids"""]
if len(A ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCAmelCase : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
UpperCAmelCase : List[str] = list(set(A ) )
if len(A ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCAmelCase : str = np.array(A )
return target_ids
def _lowercase( self , A=None , A=None ) -> List[Any]:
UpperCAmelCase : Dict = {}
if targets is not None:
UpperCAmelCase : List[Any] = self.get_target_ids(A , A )
UpperCAmelCase : Union[str, Any] = target_ids
if top_k is not None:
UpperCAmelCase : str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , A , *A , **A ) -> Dict:
UpperCAmelCase : List[Any] = super().__call__(A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs
| 338
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : List[str] = """Hello, World!"""
a : List[Any] = """en_XX"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Dict = Path("""data_bin""" )
UpperCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowercase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowercase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowercase )
UpperCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowercase )
UpperCAmelCase : str = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : List[str] = model.roberta.encoder.layer[i]
UpperCAmelCase : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCAmelCase : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : Any = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCAmelCase : Any = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : List[str] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCAmelCase : List[str] = xmod_layer.fca.weight
UpperCAmelCase : str = xmod_layer.fca.bias
# output
UpperCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCAmelCase : Dict = xmod_layer.fca.weight
UpperCAmelCase : Dict = xmod_layer.fca.bias
UpperCAmelCase : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : str = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : List[Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : int = from_adapter.fca.bias
UpperCAmelCase : Dict = from_adapter.fca.weight
UpperCAmelCase : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : Tuple = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Dict = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Any = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowercase ) )
else:
UpperCAmelCase : Optional[Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : List[str] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 338
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = np.random.default_rng(UpperCamelCase_ )
UpperCamelCase__ :Tuple = length
UpperCamelCase__ :Optional[int] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCamelCase__ :str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :Tuple = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Tuple = False
return x * self.a[0] + self.b[0]
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :List[Any] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :str = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Optional[int] = False
return x * self.a + self.b
def a ( __a , __a = 16 ) -> Union[str, Any]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCamelCase__ :Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCamelCase__ :Dict = load_dataset('''csv''' , data_files=__a )
UpperCamelCase__ :int = datasets['''train'''].unique('''label''' )
UpperCamelCase__ :List[str] = {v: i for i, v in enumerate(__a )}
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Any = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a , padding='''max_length''' )
if "label" in examples:
UpperCamelCase__ :str = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :str = datasets.map(
__a , batched=__a , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :int = DataLoader(tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=2 )
UpperCamelCase__ :Dict = DataLoader(tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=1 )
return train_dataloader, eval_dataloader
| 97
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 0
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCAmelCase_ : int = update_area_of_max_square(lowerCAmelCase__ , col + 1 )
lowerCAmelCase_ : str = update_area_of_max_square(row + 1 , col + 1 )
lowerCAmelCase_ : Optional[Any] = update_area_of_max_square(row + 1 , lowerCAmelCase__ )
if mat[row][col]:
lowerCAmelCase_ : str = 1 + min([right, diagonal, down] )
lowerCAmelCase_ : Any = max(largest_square_area[0] , lowerCAmelCase__ )
return sub_problem_sol
else:
return 0
lowerCAmelCase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCAmelCase_ : str = update_area_of_max_square_using_dp_array(lowerCAmelCase__ , col + 1 , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
if mat[row][col]:
lowerCAmelCase_ : List[str] = 1 + min([right, diagonal, down] )
lowerCAmelCase_ : Dict = max(largest_square_area[0] , lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCAmelCase_ : Optional[Any] = [0]
lowerCAmelCase_ : Tuple = [[-1] * cols for _ in range(lowerCAmelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCAmelCase__ )
return largest_square_area[0]
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[int]] ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCAmelCase_ : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ : Tuple = dp_array[row][col + 1]
lowerCAmelCase_ : Tuple = dp_array[row + 1][col + 1]
lowerCAmelCase_ : Optional[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCAmelCase_ : List[str] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Any = max(dp_array[row][col] , lowerCAmelCase__ )
else:
lowerCAmelCase_ : Any = 0
return largest_square_area
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[int]] ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = [0] * (cols + 1)
lowerCAmelCase_ : int = [0] * (cols + 1)
lowerCAmelCase_ : List[str] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ : str = current_row[col + 1]
lowerCAmelCase_ : Dict = next_row[col + 1]
lowerCAmelCase_ : List[Any] = next_row[col]
if mat[row][col] == 1:
lowerCAmelCase_ : Tuple = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = max(current_row[col] , lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 289
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
"""simple docstring"""
pass
| 289
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : int = ["model.decoder.embed_positions.weights"]
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if "emb" in name:
lowercase__ : int = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
lowercase__ : int = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
lowercase__ : Optional[int] = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
lowercase__ : Union[str, Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
lowercase__ : List[Any] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
lowercase__ : int = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
lowercase__ : Any = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
lowercase__ : int = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
lowercase__ : str = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
lowercase__ : Tuple = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ : int = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = list(state_dict.keys() )
lowercase__ : Tuple = {}
for key in keys:
lowercase__ : Tuple = state_dict.pop(_lowerCAmelCase )
lowercase__ : List[Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ : List[Any] = val[:hidden_size, :]
lowercase__ : List[Any] = val[hidden_size : 2 * hidden_size, :]
lowercase__ : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ : Union[str, Any] = val
else:
lowercase__ : int = val
return state_dict, enc_dec_proj_state_dict
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
if checkpoint == "small":
# default config values
lowercase__ : Dict = 1024
lowercase__ : Tuple = 24
lowercase__ : int = 16
elif checkpoint == "medium":
lowercase__ : List[str] = 1536
lowercase__ : List[Any] = 48
lowercase__ : int = 24
elif checkpoint == "large":
lowercase__ : Optional[Any] = 2048
lowercase__ : Optional[int] = 48
lowercase__ : List[Any] = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ : List[Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int="cpu" ):
'''simple docstring'''
lowercase__ : List[str] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
lowercase__ : Any = decoder_config_from_checkpoint(_lowerCAmelCase )
lowercase__ : int = fairseq_model.lm.state_dict()
lowercase__ : List[Any] = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
lowercase__ : int = TaEncoderModel.from_pretrained('t5-base' )
lowercase__ : Dict = EncodecModel.from_pretrained('facebook/encodec_32khz' )
lowercase__ : str = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ : Tuple = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ : Tuple = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
lowercase__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ : Optional[int] = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('t5-base' )
lowercase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
lowercase__ : Tuple = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
lowercase__ : Dict = 2048
lowercase__ : Optional[int] = 2048
# set other default generation config params
lowercase__ : Tuple = int(30 * audio_encoder.config.frame_rate )
lowercase__ : Tuple = True
lowercase__ : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_UpperCamelCase : Any = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 77
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 0
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : int=None ,_UpperCAmelCase : List[Any]=1 ):
_a : Optional[Any] = tokenizer
_a : Any = dataset
_a : List[str] = len(_lowercase ) if n_tasks is None else n_tasks
_a : int = n_copies
def __iter__( self : int ):
_a : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
_a : int = self.tokenizer(_lowercase ,padding=_lowercase ,return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ):
_a : List[str] = start_length
_a : Optional[Any] = eof_strings
_a : Optional[Any] = tokenizer
def __call__( self : Dict ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,**_UpperCAmelCase : List[Any] ):
_a : List[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_lowercase )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
_a : Union[str, Any] = re.split('(%s)' % '|'.join(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=20 , **lowerCAmelCase_ ) -> Optional[int]:
_a : List[str] = defaultdict(SCREAMING_SNAKE_CASE_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(SCREAMING_SNAKE_CASE_ ) ):
with torch.no_grad():
_a : int = batch['ids'].shape[-1]
_a : str = accelerator.unwrap_model(SCREAMING_SNAKE_CASE_ ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# each task is generated batch_size times
_a : Optional[Any] = batch['task_id'].repeat(SCREAMING_SNAKE_CASE_ )
_a : int = accelerator.pad_across_processes(
SCREAMING_SNAKE_CASE_ , dim=1 , pad_index=tokenizer.pad_token_id )
_a , _a : Any = accelerator.gather((generated_tokens, generated_tasks) )
_a : str = generated_tokens.cpu().numpy()
_a : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
gen_token_dict[task].append(SCREAMING_SNAKE_CASE_ )
_a : Any = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_a : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
code_gens[task].append(remove_last_block(SCREAMING_SNAKE_CASE_ ) )
return code_gens
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : Dict = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
_a : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_a : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_a : List[Any] = 'false'
if args.num_workers is None:
_a : str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_a : int = Accelerator()
set_seed(args.seed , device_specific=SCREAMING_SNAKE_CASE_ )
# Load model and tokenizer
_a : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_a : str = tokenizer.eos_token
_a : int = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_a : Optional[int] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ),
}
# Load evaluation dataset and metric
_a : List[str] = load_dataset('openai_humaneval' )
_a : Any = load_metric('code_eval' )
_a : Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
_a : Optional[int] = args.n_samples // args.batch_size
_a : Optional[Any] = TokenizedDataset(SCREAMING_SNAKE_CASE_ , human_eval['test'] , n_copies=SCREAMING_SNAKE_CASE_ , n_tasks=SCREAMING_SNAKE_CASE_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
_a : str = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`'
' flag to enable code evaluation.' )
raise exception
_a , _a : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_a : Tuple = complete_code(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , n_tasks=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size , **SCREAMING_SNAKE_CASE_ , )
if accelerator.is_main_process:
_a : Any = []
for task in tqdm(range(SCREAMING_SNAKE_CASE_ ) ):
_a : Dict = human_eval['test'][task]['test']
_a : Any = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
_a , _a : Tuple = code_eval_metric.compute(
references=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 366
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
return "".join(sorted(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCAmelCase_ )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 107
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=snake_case__ ).to(snake_case__ )
_UpperCAmelCase = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCAmelCase = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCAmelCase = model(input_ids.to(snake_case__ ) , labels=labels.to(snake_case__ ) ).loss
_UpperCAmelCase = -(labels.shape[-1] * loss.item())
_UpperCAmelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 133
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265
| 0
|
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = set()
# edges = list of graph's edges
lowercase__ = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase__ , lowercase__ = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 352
|
lowerCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase = [None] * 1000_0000
lowerCAmelCase = True
lowerCAmelCase = False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ = chain(next_number(SCREAMING_SNAKE_CASE ) )
lowercase__ = number_chain
while number < 10_00_00_00:
lowercase__ = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE = 10_00_00_00 ):
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 93
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] = BartphoTokenizer
snake_case__ : Optional[int] = False
snake_case__ : Optional[Any] = True
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
super().setUp()
__SCREAMING_SNAKE_CASE = ['▁This', '▁is', '▁a', '▁t', 'est']
__SCREAMING_SNAKE_CASE = dict(zip(__a , range(len(__a ) ) ) )
__SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__SCREAMING_SNAKE_CASE = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Tuple , **UpperCAmelCase__ : Dict ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__a )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = 'This is a là test'
__SCREAMING_SNAKE_CASE = 'This is a<unk><unk> test'
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = 'This is a là test'
__SCREAMING_SNAKE_CASE = '▁This ▁is ▁a ▁l à ▁t est'.split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 350
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=1_3 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[str]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Any=1_0 , UpperCAmelCase__ : str=0.02 , ) -> Tuple:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any:
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase_ ( self : int ) -> None:
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Any ):
return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 195
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 289
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
_snake_case : Tuple = PegasusConfig
_snake_case : int = {}
_snake_case : str = 'gelu'
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
_UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 289
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Tuple = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
from math import isqrt
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]]
def lowerCamelCase__ (_UpperCAmelCase = 10**8):
SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 327
| 0
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return base * power(_UpperCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
A : List[Any] = int(input("Enter the base: ").strip())
A : str = int(input("Enter the exponent: ").strip())
A : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 57
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ (TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Any ) -> Optional[Any]:
super().__init__(features=__lowerCamelCase )
a = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict ) -> Dict:
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> str:
import torch
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
a = {"dtype": torch.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
a = np.asarray(__lowerCamelCase )
return torch.tensor(__lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ) -> List[str]:
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , torch.Tensor ):
a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : dict ) -> str:
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
a = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : pa.Table ) -> "torch.Tensor":
a = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
a = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
a = self.recursive_tensorize(__lowerCamelCase )
a = self._consolidate(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
a = self.python_features_decoder.decode_batch(__lowerCamelCase )
a = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
a = self._consolidate(batch[column_name] )
return batch
| 107
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Any , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 111
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase_ = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase_ = '=======\n>>>>>>>\n'
lowerCamelCase_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def SCREAMING_SNAKE_CASE_ ( __A : Namespace ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ ( A ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase_ ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCamelCase )
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , *__lowerCamelCase : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_logger("datasets-cli/converting" )
_SCREAMING_SNAKE_CASE = tfds_path
_SCREAMING_SNAKE_CASE = datasets_directory
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isfile(__lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCamelCase , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = []
for line in lines:
_SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_SCREAMING_SNAKE_CASE = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_SCREAMING_SNAKE_CASE = ""
continue
elif "from absl import logging" in out_line:
_SCREAMING_SNAKE_CASE = "from datasets import logging\n"
elif "getLogger" in out_line:
_SCREAMING_SNAKE_CASE = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : e in out_line , __lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCamelCase ) + "\n" )
out_lines.append(__lowerCamelCase )
out_lines.append(__lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_SCREAMING_SNAKE_CASE = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_SCREAMING_SNAKE_CASE = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_SCREAMING_SNAKE_CASE = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_SCREAMING_SNAKE_CASE = True
out_lines.append(__lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_SCREAMING_SNAKE_CASE = f_name.replace(".py" , "" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCamelCase )
if needs_manual_update:
with_manual_update.append(__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__lowerCamelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_SCREAMING_SNAKE_CASE = os.path.basename(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__lowerCamelCase , __lowerCamelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 111
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( lowerCamelCase_ ):
_a = ["vqvae"]
def __init__( self , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , vqvae=__SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
return 50 if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ) else 1000
@torch.no_grad()
def __call__( self , _a = 1 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = 0 , _a = None , _a = None , _a=True , ) -> str:
_A : str = steps or self.get_default_steps()
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
_A : List[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_A : str = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_A : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__SCREAMING_SNAKE_CASE , device=self.device , )
_A : str = noise
_A : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_A : int = self.mel.audio_slice_to_image(__SCREAMING_SNAKE_CASE )
_A : int = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
_A : Dict = (input_image / 255) * 2 - 1
_A : Optional[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_A : Optional[int] = self.vqvae.encode(torch.unsqueeze(__SCREAMING_SNAKE_CASE , 0 ) ).latent_dist.sample(
generator=__SCREAMING_SNAKE_CASE )[0]
_A : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_A : str = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler.timesteps[start_step - 1] )
_A : str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_A : Union[str, Any] = int(mask_start_secs * pixels_per_second )
_A : Union[str, Any] = int(mask_end_secs * pixels_per_second )
_A : List[Any] = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __SCREAMING_SNAKE_CASE ):
_A : Any = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
else:
_A : int = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ):
_A : str = self.scheduler.step(
model_output=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )['''prev_sample''']
else:
_A : Tuple = self.scheduler.step(
model_output=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_A : Dict = mask[:, step, :, :mask_start]
if mask_end > 0:
_A : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_A : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_A : Union[str, Any] = self.vqvae.decode(__SCREAMING_SNAKE_CASE )['''sample''']
_A : str = (images / 2 + 0.5).clamp(0 , 1 )
_A : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_A : Any = (images * 255).round().astype("""uint8""" )
_A : Union[str, Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__SCREAMING_SNAKE_CASE , mode="""RGB""" ).convert("""L""" ) for _ in images) )
_A : Any = [self.mel.image_to_audio(__SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) , **ImagePipelineOutput(__SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def a__ ( self , _a , _a = 50 ) -> int:
assert isinstance(self.scheduler , __SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
_A : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
_A : Tuple = (sample / 255) * 2 - 1
_A : List[Any] = torch.Tensor(__SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_A : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_A : Union[str, Any] = self.scheduler.alphas_cumprod[t]
_A : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_A : Optional[int] = 1 - alpha_prod_t
_A : List[Any] = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
_A : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_A : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_A : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = acos(torch.dot(torch.flatten(__SCREAMING_SNAKE_CASE ) , torch.flatten(__SCREAMING_SNAKE_CASE ) ) / torch.norm(__SCREAMING_SNAKE_CASE ) / torch.norm(__SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(__SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(__SCREAMING_SNAKE_CASE )
| 26
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''nat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = num_heads
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : int = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Dict = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Tuple = layer_scale_init_value
lowercase_ : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93
| 0
|
'''simple docstring'''
class _snake_case ( lowercase_ ):
pass
class _snake_case ( lowercase_ ):
pass
class _snake_case :
def __init__( self ) -> Tuple:
'''simple docstring'''
snake_case_ = [
[],
[],
[],
]
def lowerCAmelCase__ ( self , a__ , a__ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class _snake_case :
def __init__( self ) -> Tuple:
'''simple docstring'''
snake_case_ = []
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
snake_case_ = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(snake_case )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(snake_case )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(snake_case )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(snake_case )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 92
|
'''simple docstring'''
def UpperCamelCase_( snake_case : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
snake_case_ = set(range(3 , snake_case , 2 ) )
primes.add(2 )
for p in range(3 , snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case , snake_case ) ) )
snake_case_ = [float(snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case , limit + 1 , snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 92
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__SCREAMING_SNAKE_CASE ):
print(F'''{i}\t\t{d}''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
lowercase , lowercase , lowercase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [float('inf' )] * vertex_count
lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
lowercase , lowercase , lowercase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowercase = distance[u] + w
lowercase = check_negative_cycle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = int(input('''Enter number of vertices: ''').strip())
UpperCAmelCase = int(input('''Enter number of edges: ''').strip())
UpperCAmelCase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
UpperCAmelCase = {'''src''': src, '''dst''': dest, '''weight''': weight}
UpperCAmelCase = int(input('''\nEnter shortest path source:''').strip())
UpperCAmelCase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 195
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = to_pil_image(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = pil_image.size
lowercase = pytesseract.image_to_data(__SCREAMING_SNAKE_CASE , lang=__SCREAMING_SNAKE_CASE , output_type='dict' , config=__SCREAMING_SNAKE_CASE )
lowercase , lowercase , lowercase , lowercase , lowercase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase = [idx for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if not word.strip()]
lowercase = [word for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase = []
for x, y, w, h in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [x, y, x + w, y + h]
actual_boxes.append(__SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowercase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = "" , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_value
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase = apply_ocr
lowercase = ocr_lang
lowercase = tesseract_config
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase = (size['height'], size['width'])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case=None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowercase = []
lowercase = []
for image in images:
lowercase , lowercase = apply_tesseract(snake_case , snake_case , snake_case )
words_batch.append(snake_case )
boxes_batch.append(snake_case )
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case )
if apply_ocr:
lowercase = words_batch
lowercase = boxes_batch
return data
| 195
| 1
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def A_ ( *_lowerCAmelCase ) -> List[Any]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : Any = list(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
UpperCamelCase : int = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Dict = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def A_ ( _lowerCAmelCase = None , _lowerCAmelCase = 128 ) -> List[Any]:
if function is None:
return functools.partial(_lowerCAmelCase , starting_batch_size=_lowerCAmelCase )
UpperCamelCase : Dict = starting_batch_size
def decorator(*_lowerCAmelCase , **_lowerCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase : Any = list(inspect.signature(_lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(_lowerCAmelCase ) < (len(_lowerCAmelCase ) + 1):
UpperCamelCase : Optional[int] = ''', '''.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(_lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 371
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[float]:
UpperCamelCase , UpperCamelCase : List[Any] = coefficient_matrix.shape
UpperCamelCase , UpperCamelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
UpperCamelCase : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
UpperCamelCase : Tuple = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
UpperCamelCase : Any = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase , UpperCamelCase : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = []
for row in range(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = 0
for col in range(_lowerCAmelCase ):
if col == row:
UpperCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
UpperCamelCase : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase : Dict = (temp + val) / denom
new_val.append(_lowerCAmelCase )
UpperCamelCase : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase , UpperCamelCase : Dict = table.shape
UpperCamelCase : List[Any] = True
for i in range(0 , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 0
|
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCAmelCase ( _lowerCamelCase ):
# to overwrite at feature extractactor specific tests
__lowercase = None
__lowercase = None
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'feature_size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'sampling_rate' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'padding_value' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowerCamelCase ( self , lowerCAmelCase_=False ):
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase_ ):
_snake_case = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1E-3 ):
return False
return True
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = self.feat_extract_tester.seq_length_diff
_snake_case = self.feat_extract_tester.max_seq_length + pad_diff
_snake_case = self.feat_extract_tester.min_seq_length
_snake_case = self.feat_extract_tester.batch_size
_snake_case = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )
_snake_case = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='max_length' )[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_snake_case = feat_extract.pad(lowerCAmelCase_ , pad_to_multiple_of=10 )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , pad_to_multiple_of=10 )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_snake_case = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowerCamelCase ( self , lowerCAmelCase_=False ):
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase_ ):
_snake_case = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1E-3 ):
return False
return True
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
_snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to smallest with np
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to middle
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='longest' , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='longest' , truncation=lowerCAmelCase_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='max_length' , truncation=lowerCAmelCase_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_snake_case = 12
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_snake_case = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_snake_case = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = min(lowerCAmelCase_ )
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 42
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327
| 0
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase="shi-labs/oneformer_demo" ) -> str:
'''simple docstring'''
with open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) as f:
snake_case_ = json.load(__UpperCAmelCase )
snake_case_ = {}
snake_case_ = []
snake_case_ = []
for key, info in class_info.items():
snake_case_ = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__UpperCAmelCase ) )
snake_case_ = thing_ids
snake_case_ = class_names
return metadata
class a ( unittest.TestCase ):
def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=7 , lowercase_ : List[str]=3 , lowercase_ : Tuple=30 , lowercase_ : str=400 , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=[0.5, 0.5, 0.5] , lowercase_ : Optional[int]=[0.5, 0.5, 0.5] , lowercase_ : List[Any]=10 , lowercase_ : Tuple=False , lowercase_ : Tuple=255 , lowercase_ : List[Any]="shi-labs/oneformer_demo" , lowercase_ : Any="ade20k_panoptic.json" , lowercase_ : List[str]=10 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = class_info_file
snake_case_ = prepare_metadata(lowercase_ , lowercase_ )
snake_case_ = num_text
snake_case_ = repo_path
# for the post_process_functions
snake_case_ = 2
snake_case_ = 10
snake_case_ = 10
snake_case_ = 3
snake_case_ = 4
snake_case_ = num_labels
snake_case_ = do_reduce_labels
snake_case_ = ignore_index
def A_ ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A_ ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[Any]=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
snake_case_ ,snake_case_ = image.size
else:
snake_case_ ,snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['''shortest_edge'''] * h / w )
snake_case_ = self.size['''shortest_edge''']
elif w > h:
snake_case_ = self.size['''shortest_edge''']
snake_case_ = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case_ = self.size['''shortest_edge''']
snake_case_ = self.size['''shortest_edge''']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ ,snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
snake_case_ = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
def A_ ( self : Optional[Any] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case_ = image_processing_class
def A_ ( self : int ):
snake_case_ = OneFormerImageProcessorTester(self )
@property
def A_ ( self : Optional[int] ):
return self.image_processing_tester.prepare_image_processor_dict()
def A_ ( self : List[Any] ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ , '''size''' ) )
self.assertTrue(hasattr(lowercase_ , '''ignore_index''' ) )
self.assertTrue(hasattr(lowercase_ , '''class_info_file''' ) )
self.assertTrue(hasattr(lowercase_ , '''num_text''' ) )
self.assertTrue(hasattr(lowercase_ , '''repo_path''' ) )
self.assertTrue(hasattr(lowercase_ , '''metadata''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_reduce_labels''' ) )
def A_ ( self : Dict ):
pass
def A_ ( self : str ):
# Initialize image_processor
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
snake_case_ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
snake_case_ ,snake_case_ = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
snake_case_ = image_processor(
lowercase_ , ['''semantic'''] * len(lowercase_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : List[str] ):
# Initialize image_processor
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
snake_case_ ,snake_case_ = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
snake_case_ = image_processor(
lowercase_ , ['''semantic'''] * len(lowercase_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : List[str] ):
# Initialize image_processor
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
snake_case_ ,snake_case_ = self.image_processing_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_ )
snake_case_ = image_processor(
lowercase_ , ['''semantic'''] * len(lowercase_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False , lowercase_ : int="np" ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
snake_case_ = self.image_processing_tester.num_labels
snake_case_ = None
snake_case_ = None
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ )
if with_segmentation_maps:
snake_case_ = num_labels
if is_instance_map:
snake_case_ = list(range(lowercase_ ) ) * 2
snake_case_ = dict(enumerate(lowercase_ ) )
snake_case_ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
snake_case_ = [Image.fromarray(lowercase_ ) for annotation in annotations]
snake_case_ = image_processor(
lowercase_ , ['''semantic'''] * len(lowercase_ ) , lowercase_ , return_tensors='''pt''' , instance_id_to_semantic_id=lowercase_ , pad_and_return_pixel_mask=lowercase_ , )
return inputs
def A_ ( self : Optional[int] ):
pass
def A_ ( self : Dict ):
def common(lowercase_ : int=False , lowercase_ : Tuple=None ):
snake_case_ = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowercase_ , is_instance_map=lowercase_ , segmentation_type=lowercase_ )
snake_case_ = inputs['''mask_labels''']
snake_case_ = inputs['''class_labels''']
snake_case_ = inputs['''pixel_values''']
snake_case_ = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(lowercase_ , lowercase_ , lowercase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowercase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowercase_ )
common(is_instance_map=lowercase_ , segmentation_type='''pil''' )
common(is_instance_map=lowercase_ , segmentation_type='''pil''' )
def A_ ( self : Any ):
snake_case_ = np.zeros((20, 50) )
snake_case_ = 1
snake_case_ = 1
snake_case_ = 1
snake_case_ = binary_mask_to_rle(lowercase_ )
self.assertEqual(len(lowercase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
snake_case_ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ = fature_extractor.post_process_semantic_segmentation(lowercase_ )
self.assertEqual(len(lowercase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
snake_case_ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
snake_case_ = fature_extractor.post_process_semantic_segmentation(lowercase_ , target_sizes=lowercase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
snake_case_ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ = image_processor.post_process_instance_segmentation(lowercase_ , threshold=0 )
self.assertTrue(len(lowercase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , lowercase_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A_ ( self : Dict ):
snake_case_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
snake_case_ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ = image_processor.post_process_panoptic_segmentation(lowercase_ , threshold=0 )
self.assertTrue(len(lowercase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , lowercase_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 72
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class a ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = None
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=0.9_9_9, __UpperCAmelCase="cosine", ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
snake_case_ = []
for i in range(__UpperCAmelCase ):
snake_case_ = i / num_diffusion_timesteps
snake_case_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ), __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase, dtype=torch.floataa )
class a ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : List[str] , lowercase_ : int = 1000 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
snake_case_ = betas_for_alpha_bar(lowercase_ )
snake_case_ = 1.0 - self.betas
snake_case_ = torch.cumprod(self.alphas , dim=0 )
snake_case_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# setable values
snake_case_ = None
snake_case_ = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() )
snake_case_ = variance_type
def A_ ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ):
return sample
def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ):
snake_case_ = num_inference_steps
snake_case_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case_ = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case_ = torch.from_numpy(lowercase_ ).to(lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None ):
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case_ = torch.log(torch.clamp(lowercase_ , min=1e-20 ) )
snake_case_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case_ = variance.log()
snake_case_ = beta.log()
snake_case_ = (predicted_variance + 1) / 2
snake_case_ = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : List[Any] , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : bool = True , ):
snake_case_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case_ ,snake_case_ = torch.split(lowercase_ , sample.shape[1] , dim=1 )
else:
snake_case_ = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
snake_case_ = self.alphas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ = torch.clamp(
lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ = 0
if t > 0:
snake_case_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device )
snake_case_ = self._get_variance(
lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , )
if self.variance_type == "fixed_small_log":
snake_case_ = variance
elif self.variance_type == "learned_range":
snake_case_ = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
''' for the UnCLIPScheduler.''' )
snake_case_ = variance * variance_noise
snake_case_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ )
def A_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
snake_case_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case_ = timesteps.to(original_samples.device )
snake_case_ = alphas_cumprod[timesteps] ** 0.5
snake_case_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_alpha_prod.unsqueeze(-1 )
snake_case_ = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 72
| 1
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not nums:
return 0
__snake_case: List[str] = nums[0]
__snake_case: int = 0
for num in nums[1:]:
__snake_case , __snake_case: str = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__),
)
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = 1_0001) -> int:
try:
__snake_case: List[str] = int(SCREAMING_SNAKE_CASE__)
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""") from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""")
__snake_case: list[int] = []
__snake_case: List[str] = 2
while len(SCREAMING_SNAKE_CASE__) < nth:
if is_prime(SCREAMING_SNAKE_CASE__):
primes.append(SCREAMING_SNAKE_CASE__)
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 111
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Dict = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''realm'''
def __init__( self : Dict , __magic_name__ : str=30_522 , __magic_name__ : List[str]=768 , __magic_name__ : str=128 , __magic_name__ : List[Any]=12 , __magic_name__ : List[str]=12 , __magic_name__ : List[str]=8 , __magic_name__ : Any=3_072 , __magic_name__ : str="gelu_new" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=512 , __magic_name__ : Tuple=2 , __magic_name__ : int=0.02 , __magic_name__ : Union[str, Any]=1e-12 , __magic_name__ : Union[str, Any]=256 , __magic_name__ : Optional[Any]=10 , __magic_name__ : Optional[int]=1e-3 , __magic_name__ : Dict=5 , __magic_name__ : Dict=320 , __magic_name__ : List[Any]=13_353_718 , __magic_name__ : Optional[Any]=5_000 , __magic_name__ : List[Any]=1 , __magic_name__ : Optional[int]=0 , __magic_name__ : Optional[Any]=2 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
# Common config
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = retriever_proj_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_candidates
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ = span_hidden_size
SCREAMING_SNAKE_CASE_ = max_span_width
SCREAMING_SNAKE_CASE_ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ = reader_beam_size
SCREAMING_SNAKE_CASE_ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ = num_block_records
SCREAMING_SNAKE_CASE_ = searcher_beam_size
| 350
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a__ ( snake_case__ ):
_a : List[str] = """marian"""
_a : Optional[int] = ["""past_key_values"""]
_a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _A=5_8_1_0_1 , _A=None , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.0 , _A=0.0 , _A=True , _A=True , _A="gelu" , _A=1_0_2_4 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=5_8_1_0_0 , _A=False , _A=5_8_1_0_0 , _A=0 , _A=0 , _A=True , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = decoder_vocab_size or vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class a__ ( snake_case__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase = {0: "batch"}
__lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(_A ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super().outputs
else:
__lowerCAmelCase = super(_A , self ).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(_A ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
__lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
# Generate decoder inputs
__lowerCAmelCase = seq_length if not self.use_past else 1
__lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
__lowerCAmelCase = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
__lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = decoder_seq_length + 3
__lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_A , _A )] , dim=1 )
__lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase = min(_A , _A )
__lowerCAmelCase = max(_A , _A ) - min_num_layers
__lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
__lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
__lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = common_inputs["attention_mask"].dtype
__lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A )] , dim=1 )
__lowerCAmelCase = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
__lowerCAmelCase = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase = tokenizer.num_special_tokens_to_add(_A )
__lowerCAmelCase = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
__lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
__lowerCAmelCase = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1E-4
| 92
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class a__ :
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = after_output[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs()
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = after_outputs[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFViTModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFViTModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(_A , name="vision_model" )
__lowerCAmelCase = TFRobertaModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = TFRobertaModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" )
__lowerCAmelCase = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
| 92
| 1
|
import os
def _a ( ) -> Optional[Any]:
'''simple docstring'''
__A = os.path.join(os.path.dirname(lowerCamelCase ) , '''num.txt''' )
with open(lowerCamelCase ) as file_hand:
return str(sum(int(lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 250
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Dict )-> Dict:
__A = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
__A = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__A = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__A = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__A = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
__A = -(labels.shape[-1] * loss.item())
__A = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 250
| 1
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 140
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''xlm-roberta-xl'''
def __init__( self , __UpperCAmelCase=25_08_80 , __UpperCAmelCase=25_60 , __UpperCAmelCase=36 , __UpperCAmelCase=32 , __UpperCAmelCase=1_02_40 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_14 , __UpperCAmelCase=1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-05 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 356
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( _lowercase , _lowercase):
@register_to_config
def __init__( self : int , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Tuple = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCamelCase : Dict = torch.zeros(__lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = torch.nn.Parameter(__lowerCAmelCase )
class __snake_case ( _lowercase):
snake_case__ : VQModel
snake_case__ : CLIPTextModel
snake_case__ : CLIPTokenizer
snake_case__ : TransformeraDModel
snake_case__ : LearnedClassifierFreeSamplingEmbeddings
snake_case__ : VQDiffusionScheduler
def __init__( self : List[str] , __lowerCAmelCase : VQModel , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : TransformeraDModel , __lowerCAmelCase : VQDiffusionScheduler , __lowerCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__lowerCAmelCase , transformer=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = len(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else 1
# get prompt text embeddings
_lowerCamelCase : Optional[int] = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_lowerCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_lowerCamelCase : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCamelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCamelCase : Tuple = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase : Any = prompt_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCamelCase : str = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCamelCase : Any = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCAmelCase , 1 , 1 )
else:
_lowerCamelCase : str = [''''''] * batch_size
_lowerCamelCase : Dict = text_input_ids.shape[-1]
_lowerCamelCase : Tuple = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
_lowerCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCamelCase : str = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : Any = negative_prompt_embeds.shape[1]
_lowerCamelCase : Optional[int] = negative_prompt_embeds.repeat(1 , __lowerCAmelCase , 1 )
_lowerCamelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : int = 1_0_0 , __lowerCAmelCase : float = 5.0 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = len(__lowerCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}''' )
_lowerCamelCase : List[str] = batch_size * num_images_per_prompt
_lowerCamelCase : Optional[Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_prompt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__lowerCAmelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_lowerCamelCase : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCamelCase : int = self.transformer.num_vector_embeds - 1
_lowerCamelCase : Optional[int] = torch.full(__lowerCAmelCase , __lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_lowerCamelCase : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase , device=self.device )
_lowerCamelCase : List[str] = self.scheduler.timesteps.to(self.device )
_lowerCamelCase : int = latents
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCamelCase : str = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCamelCase : Optional[int] = self.transformer(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase ).sample
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : Any = model_output.chunk(2 )
_lowerCamelCase : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCAmelCase , dim=1 , keepdim=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.truncate(__lowerCAmelCase , __lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
_lowerCamelCase : int = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = self.vqvae.config.vq_embed_dim
_lowerCamelCase : List[str] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCamelCase : List[Any] = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase , shape=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.vqvae.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase ).sample
_lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : float ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = torch.sort(__lowerCAmelCase , 1 , descending=__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.exp(__lowerCAmelCase )
_lowerCamelCase : Tuple = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCamelCase : Any = torch.full_like(keep_mask[:, 0:1, :] , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCamelCase : Tuple = keep_mask[:, :-1, :]
_lowerCamelCase : List[str] = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCamelCase : Union[str, Any] = log_p_x_0.clone()
_lowerCamelCase : Optional[Any] = -torch.inf # -inf = log(0)
return rv
| 72
|
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [ord(A_ ) - 96 for elem in plain]
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''', A_ )
print('''Decoded:''', decode(A_ ) )
if __name__ == "__main__":
main()
| 72
| 1
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _snake_case ( __lowercase ):
@require_torch
def snake_case__ ( self):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase__ : int = """\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n """
UpperCAmelCase__ : Tuple = """\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n """
UpperCAmelCase__ : Tuple = """\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n """
# Force fetching the files so that we can use the cache
UpperCAmelCase__ : int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(_a)
BertModel.from_pretrained(_a)
BertTokenizer.from_pretrained(_a)
pipeline(task="""fill-mask""" , model=_a)
# baseline - just load from_pretrained with normal network
UpperCAmelCase__ : List[str] = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
UpperCAmelCase__ : str = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase__ : str = """1"""
UpperCAmelCase__ : List[str] = subprocess.run(_a , env=_a , check=_a , capture_output=_a)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def snake_case__ ( self):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase__ : Any = """\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n """
UpperCAmelCase__ : str = """\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n """
UpperCAmelCase__ : List[Any] = """\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n """
# Force fetching the files so that we can use the cache
UpperCAmelCase__ : List[str] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(_a)
BertModel.from_pretrained(_a)
BertTokenizer.from_pretrained(_a)
pipeline(task="""fill-mask""" , model=_a)
# baseline - just load from_pretrained with normal network
UpperCAmelCase__ : str = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
UpperCAmelCase__ : int = self.get_env()
UpperCAmelCase__ : Dict = subprocess.run(_a , env=_a , check=_a , capture_output=_a)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def snake_case__ ( self):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase__ : Any = """\nfrom transformers import BertConfig, BertModel, BertTokenizer\n """
UpperCAmelCase__ : str = """\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n """
UpperCAmelCase__ : Optional[int] = """\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n """
# baseline - just load from_pretrained with normal network
UpperCAmelCase__ : Tuple = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
UpperCAmelCase__ : List[Any] = self.get_env()
UpperCAmelCase__ : str = subprocess.run(_a , env=_a , check=_a , capture_output=_a)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# next emulate no network
UpperCAmelCase__ : int = [sys.executable, """-c""", """\n""".join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase__ : Union[str, Any] = """1"""
UpperCAmelCase__ : Dict = subprocess.run(_a , env=_a , check=_a , capture_output=_a)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def snake_case__ ( self):
UpperCAmelCase__ : int = """\nfrom transformers import pipeline\n """
UpperCAmelCase__ : List[str] = """\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n """
UpperCAmelCase__ : Union[str, Any] = """\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n """
UpperCAmelCase__ : Optional[Any] = self.get_env()
UpperCAmelCase__ : Optional[Any] = """1"""
UpperCAmelCase__ : int = [sys.executable, """-c""", """\n""".join([load, mock, run])]
UpperCAmelCase__ : Dict = subprocess.run(_a , env=_a , check=_a , capture_output=_a)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""") , )
@require_torch
def snake_case__ ( self):
UpperCAmelCase__ : int = """\nfrom transformers import AutoModel\n """
UpperCAmelCase__ : Union[str, Any] = """\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n """
# baseline - just load from_pretrained with normal network
UpperCAmelCase__ : Any = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
UpperCAmelCase__ : Dict = self.get_env()
UpperCAmelCase__ : Optional[Any] = subprocess.run(_a , env=_a , check=_a , capture_output=_a)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase__ : Optional[Any] = """1"""
UpperCAmelCase__ : Optional[int] = subprocess.run(_a , env=_a , check=_a , capture_output=_a)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
| 350
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ''''''
lowerCAmelCase :str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase :str = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase :str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase):
super().__init__(self , **_lowerCamelCase)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ : Optional[Any] = fsspec.open(
_lowerCamelCase , mode="""rb""" , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase__ : List[Any] = os.path.basename(self.file.path.split("""::""")[0])
UpperCAmelCase__ : Dict = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ : Tuple = None
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase).lstrip("""/""")
def snake_case__ ( self):
if self.dir_cache is None:
UpperCAmelCase__ : Optional[Any] = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
UpperCAmelCase__ : Union[str, Any] = {f["""name"""]: f}
def snake_case__ ( self , _lowerCamelCase):
return self.file.open().read()
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self._strip_protocol(_lowerCamelCase)
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _snake_case ( a__ ):
lowerCAmelCase :Dict = '''bz2'''
lowerCAmelCase :List[str] = '''bz2'''
lowerCAmelCase :Dict = '''.bz2'''
class _snake_case ( a__ ):
lowerCAmelCase :int = '''gzip'''
lowerCAmelCase :Tuple = '''gzip'''
lowerCAmelCase :str = '''.gz'''
class _snake_case ( a__ ):
lowerCAmelCase :List[str] = '''lz4'''
lowerCAmelCase :Any = '''lz4'''
lowerCAmelCase :int = '''.lz4'''
class _snake_case ( a__ ):
lowerCAmelCase :Union[str, Any] = '''xz'''
lowerCAmelCase :int = '''xz'''
lowerCAmelCase :List[Any] = '''.xz'''
class _snake_case ( a__ ):
lowerCAmelCase :Tuple = '''zstd'''
lowerCAmelCase :List[str] = '''zstd'''
lowerCAmelCase :Union[str, Any] = '''.zst'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , ):
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ : Dict = self.file.__enter__
class _snake_case :
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = file_
def __enter__( self):
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase):
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase)
def __iter__( self):
return iter(self._file)
def snake_case__ ( self):
return next(self._file)
def __getattr__( self , _lowerCamelCase):
return getattr(self._file , _lowerCamelCase)
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase))
UpperCAmelCase__ : List[Any] = fixed_enter
| 283
| 0
|
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __magic_name__ ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 270
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
from string import ascii_uppercase
lowerCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase_ = dict(enumerate(ascii_uppercase))
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Optional[int] = 0
while True:
if x == i:
snake_case_ : Tuple = 0
if len(_UpperCamelCase ) == len(_UpperCamelCase ):
break
key += key[i]
i += 1
return key
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = ''''''
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Tuple = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : List[Any] = ''''''
snake_case_ : Union[str, Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : Tuple = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = '''THE GERMAN ATTACK'''
snake_case_ : Optional[Any] = '''SECRET'''
snake_case_ : Optional[int] = generate_key(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = cipher_text(_UpperCamelCase , _UpperCamelCase )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(_UpperCamelCase , _UpperCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 367
|
from math import isclose, sqrt
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case_ : Dict = point_y / 4 / point_x
snake_case_ : List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case_ : Union[str, Any] = outgoing_gradient**2 + 4
snake_case_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case_ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case_ : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case_ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case_ : Any = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
snake_case_ : int = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase_ ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ) -> int:
"""simple docstring"""
snake_case_ : int = 0
snake_case_ : float = first_x_coord
snake_case_ : float = first_y_coord
snake_case_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case_ , snake_case_ , snake_case_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 279
| 0
|
'''simple docstring'''
def _A ( snake_case , snake_case , snake_case , snake_case ) -> int:
# Return True if there is node that has not iterated.
_lowercase : int = [False] * len(snake_case )
_lowercase : Union[str, Any] = []
queue.append(snake_case )
_lowercase : Tuple = True
while queue:
_lowercase : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
_lowercase : List[Any] = True
_lowercase : List[Any] = u
return visited[t]
def _A ( snake_case , snake_case , snake_case ) -> Union[str, Any]:
# This array is filled by BFS and to store path
_lowercase : List[str] = [-1] * (len(snake_case ))
_lowercase : Dict = 0
while bfs(snake_case , snake_case , snake_case , snake_case ):
_lowercase : Tuple = float("Inf" )
_lowercase : str = sink
while s != source:
# Find the minimum value in select path
_lowercase : List[str] = min(snake_case , graph[parent[s]][s] )
_lowercase : List[Any] = parent[s]
max_flow += path_flow
_lowercase : int = sink
while v != source:
_lowercase : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowercase : Optional[Any] = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 250
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = ReformerTokenizer
_SCREAMING_SNAKE_CASE : List[Any] = ReformerTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowercase : Union[str, Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = "<s>"
_lowercase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCamelCase ) , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase : List[str] = self.get_tokenizer()
_lowercase : int = self.get_rust_tokenizer()
_lowercase : Optional[Any] = "I was born in 92000, and this is falsé."
_lowercase : Union[str, Any] = tokenizer.tokenize(_UpperCamelCase )
_lowercase : int = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Tuple = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : List[str] = self.get_rust_tokenizer()
_lowercase : Optional[int] = tokenizer.encode(_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
_lowercase : Union[str, Any] = "This is a simple input"
_lowercase : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowercase : Any = ("This is a simple input", "This is a pair")
_lowercase : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_lowercase : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_lowercase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase : str = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = "Hello World!"
_lowercase : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_lowercase : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowercase : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowercase : Tuple = " ".join(_UpperCamelCase )
_lowercase : str = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="pt" )
_lowercase : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_lowercase : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowercase : str = encoded_sequence["input_ids"].shape
_lowercase : Dict = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowercase : Optional[int] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
| 250
| 1
|
def snake_case (UpperCAmelCase__ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
UpperCamelCase_: Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase_: List[str] = 1
if upper_limit > 0:
UpperCamelCase_: Dict = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCAmelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
A_ : Dict = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 350
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case (UpperCAmelCase__ ) -> tuple:
return (data["data"], data["target"])
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> np.ndarray:
UpperCamelCase_: Dict = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(UpperCAmelCase__ , UpperCAmelCase__ )
# Predict target for test data
UpperCamelCase_: int = xgb.predict(UpperCAmelCase__ )
UpperCamelCase_: Any = predictions.reshape(len(UpperCAmelCase__ ) , 1 )
return predictions
def snake_case () -> None:
UpperCamelCase_: Union[str, Any] = fetch_california_housing()
UpperCamelCase_ ,UpperCamelCase_: Tuple = data_handling(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = train_test_split(
UpperCAmelCase__ , UpperCAmelCase__ , test_size=0.25 , random_state=1 )
UpperCamelCase_: Union[str, Any] = xgboost(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 292
| 0
|
import argparse
import struct
import unittest
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : List[str] = data
# Initialize hash values
_lowercase : Tuple = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
_lowercase : int = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
_lowercase : Optional[int] = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def UpperCamelCase ( lowerCamelCase) -> bytes:
"""simple docstring"""
_lowercase : int = B'\x80' + (B'\x00' * (63 - (len(lowerCamelCase) + 8) % 64))
_lowercase : int = struct.pack('>Q', (len(lowerCamelCase) * 8))
return data + padding + big_endian_integer
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : List[str] = [
self.preprocessed_data[x : x + 64]
for x in range(0, len(self.preprocessed_data), 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowercase : Tuple = list(struct.unpack('>16L', lowerCamelCase))
# add 48 0-ed integers
words += [0] * 48
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = self.hashes
for index in range(0, 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowercase : List[Any] = (
self.ror(words[index - 15], 7)
^ self.ror(words[index - 15], 18)
^ (words[index - 15] >> 3)
)
_lowercase : Optional[int] = (
self.ror(words[index - 2], 17)
^ self.ror(words[index - 2], 19)
^ (words[index - 2] >> 10)
)
_lowercase : List[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
_lowercase : int = self.ror(lowerCamelCase, 6) ^ self.ror(lowerCamelCase, 11) ^ self.ror(lowerCamelCase, 25)
_lowercase : Optional[int] = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
_lowercase : Any = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
_lowercase : Any = self.ror(lowerCamelCase, 2) ^ self.ror(lowerCamelCase, 13) ^ self.ror(lowerCamelCase, 22)
_lowercase : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
_lowercase : Optional[int] = (sa + maj) % 0x100000000
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Any = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
_lowercase : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
_lowercase : Tuple = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes)
]
_lowercase : Dict = ''.join([hex(lowerCamelCase)[2:].zfill(8) for value in self.hashes])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> None:
"""simple docstring"""
import hashlib
_lowercase : List[Any] = bytes('Test String', 'utf-8')
self.assertEqual(SHAaaa(lowerCamelCase).hash, hashlib.shaaaa(lowerCamelCase).hexdigest())
def UpperCamelCase_( ) -> Tuple:
import doctest
doctest.testmod()
_lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowercase : Dict = parser.parse_args()
_lowercase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowercase : Dict = f.read()
else:
_lowercase : int = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(_SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 21
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowerCAmelCase = 8
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
_snake_case = x.device
_snake_case = (x * 255).int().clamp(0 , 255 )
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" )
_snake_case = ((x & mask) != 0).float()
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" )
_snake_case = bits * 2 - 1
return bits
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
_snake_case = x.device
_snake_case = (x > 0).int()
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 )
_snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_snake_case = self.alphas_cumprod[timestep]
_snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_snake_case = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu"""
_snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
_snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
_snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_snake_case, _snake_case = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
_snake_case = None
# 1. compute alphas, betas
_snake_case = self.alphas_cumprod[t]
_snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
_snake_case = 1 - alpha_prod_t
_snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case = 0
if t > 0:
_snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device )
_snake_case = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
_snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> Tuple:
super().__init__()
_snake_case = bit_scale
_snake_case = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__(self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
_snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , )
_snake_case = decimal_to_bits(UpperCAmelCase ) * self.bit_scale
_snake_case = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
_snake_case = bits_to_decimal(UpperCAmelCase )
if output_type == "pil":
_snake_case = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 341
| 0
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 60_08_51_47_51_43 ):
"""simple docstring"""
try:
_a = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
_a = 1
_a = 2
while i * i <= n:
while n % i == 0:
_a = i
n //= i
i += 1
if n > 1:
_a = n
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 356
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[Any]:
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
_a = field
_a = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
_a = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> str:
# Build iterable dataset
if self.streaming:
_a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_a = None
_a = None
_a = None
_a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
_a = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
_a = dataset
_a = path_or_buf
_a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a = num_proc
_a = '''utf-8'''
_a = to_json_kwargs
def _UpperCAmelCase ( self ) -> int:
_a = self.to_json_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
_a = self.to_json_kwargs.pop('''orient''' , '''records''' )
_a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_a = self.to_json_kwargs.pop('''compression''' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__UpperCAmelCase ) as buffer:
_a = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
_a = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a , _a , _a , _a , _a = args
_a = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_a = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) -> int:
_a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
_a , _a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 153
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :str = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A: Optional[Any] = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Any ):
if isinstance(UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = size if size is not None else {"""shortest_edge""": 256}
UpperCAmelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
UpperCAmelCase : Optional[Any] = do_resize
UpperCAmelCase : Optional[int] = size
UpperCAmelCase : Optional[Any] = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : Union[str, Any] = resample
UpperCAmelCase : List[str] = do_rescale
UpperCAmelCase : str = rescale_factor
UpperCAmelCase : Dict = offset
UpperCAmelCase : List[str] = do_normalize
UpperCAmelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
UpperCAmelCase : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size["""shortest_edge"""] , default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
UpperCAmelCase : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = image.astype(np.floataa )
if offset:
UpperCAmelCase : Any = image - (scale / 2)
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase : int = to_numpy_array(_SCREAMING_SNAKE_CASE )
if do_resize:
UpperCAmelCase : Optional[Any] = self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE )
if do_center_crop:
UpperCAmelCase : Optional[int] = self.center_crop(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE )
if do_rescale:
UpperCAmelCase : int = self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE )
if do_normalize:
UpperCAmelCase : List[str] = self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return image
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Optional[Any] = offset if offset is not None else self.offset
UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Optional[int] = size if size is not None else self.size
UpperCAmelCase : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
UpperCAmelCase : Any = make_batched(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = [
[
self._preprocess_image(
image=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=_SCREAMING_SNAKE_CASE , do_rescale=_SCREAMING_SNAKE_CASE , rescale_factor=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , )
for img in video
]
for video in videos
]
UpperCAmelCase : Any = {"""pixel_values""": videos}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 350
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase : Tuple = bertabert.config.encoder.vocab_size
UpperCAmelCase : int = tokenizer.sep_token_id
UpperCAmelCase : Dict = tokenizer.cls_token_id
UpperCAmelCase : int = 128
UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCAmelCase : Optional[int] = train_dataset.select(range(32 ) )
UpperCAmelCase : int = val_dataset.select(range(16 ) )
UpperCAmelCase : List[str] = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : str = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
UpperCAmelCase : str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
UpperCAmelCase : Optional[Any] = inputs.input_ids
UpperCAmelCase : Union[str, Any] = inputs.attention_mask
UpperCAmelCase : Union[str, Any] = outputs.input_ids
UpperCAmelCase : Any = outputs.input_ids.copy()
UpperCAmelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
UpperCAmelCase : List[Any] = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = pred.label_ids
UpperCAmelCase : Tuple = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Dict = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 76
| 0
|
'''simple docstring'''
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
snake_case_ : str = global_rng
snake_case_ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = min_seq_length
snake_case_ : Tuple = max_seq_length
snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Optional[int] = padding_value
snake_case_ : Union[str, Any] = sampling_rate
snake_case_ : Optional[int] = return_attention_mask
snake_case_ : str = do_normalize
snake_case_ : str = feature_size
snake_case_ : Optional[Any] = chunk_length
snake_case_ : Union[str, Any] = hop_length
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = WhisperFeatureExtractionTester(self )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
snake_case_ : Optional[int] = feat_extract_first.to_dict()
snake_case_ : Dict = feat_extract_second.to_dict()
snake_case_ : List[str] = feat_extract_first.mel_filters
snake_case_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ )
snake_case_ : int = feat_extract_first.to_dict()
snake_case_ : Optional[int] = feat_extract_second.to_dict()
snake_case_ : Union[str, Any] = feat_extract_first.mel_filters
snake_case_ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test batched
snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : List[str] = np.asarray(__magic_name__ )
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test truncation required
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated]
snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
import torch
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case_ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : str = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case_ : List[Any] = self._load_datasamples(1 )
snake_case_ : Union[str, Any] = WhisperFeatureExtractor()
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[int] = self._load_datasamples(1 )[0]
snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0]
self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
| 279
| 0
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
_lowerCAmelCase : int = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
_lowerCAmelCase : int = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_lowerCAmelCase : int = transform(UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
return image
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
if "visual_encoder" in key:
_lowerCAmelCase : Optional[int] = re.sub("""visual_encoder*""" , """vision_model.encoder""" , UpperCamelCase_ )
if "blocks" in key:
_lowerCAmelCase : Union[str, Any] = re.sub(R"""blocks""" , """layers""" , UpperCamelCase_ )
if "attn" in key:
_lowerCAmelCase : Any = re.sub(R"""attn""" , """self_attn""" , UpperCamelCase_ )
if "norm1" in key:
_lowerCAmelCase : Dict = re.sub(R"""norm1""" , """layer_norm1""" , UpperCamelCase_ )
if "norm2" in key:
_lowerCAmelCase : List[str] = re.sub(R"""norm2""" , """layer_norm2""" , UpperCamelCase_ )
if "encoder.norm" in key:
_lowerCAmelCase : Optional[int] = re.sub(R"""encoder.norm""" , """post_layernorm""" , UpperCamelCase_ )
if "encoder.patch_embed.proj" in key:
_lowerCAmelCase : Optional[int] = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , UpperCamelCase_ )
if "encoder.pos_embed" in key:
_lowerCAmelCase : Optional[Any] = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , UpperCamelCase_ )
if "encoder.cls_token" in key:
_lowerCAmelCase : Optional[int] = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , UpperCamelCase_ )
if "self_attn" in key:
_lowerCAmelCase : List[Any] = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , UpperCamelCase_ )
return key
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase : Any = BlipConfig.from_pretrained(UpperCamelCase_ )
else:
_lowerCAmelCase : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_lowerCAmelCase : Optional[Any] = BlipForConditionalGeneration(UpperCamelCase_ ).eval()
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
_lowerCAmelCase : Optional[Any] = blip_decoder(pretrained=UpperCamelCase_ , image_size=384 , vit="""base""" )
_lowerCAmelCase : Optional[Any] = pt_model.eval()
_lowerCAmelCase : List[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase : Optional[Any] = modified_state_dict.pop(UpperCamelCase_ )
_lowerCAmelCase : int = rename_key(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = value
hf_model.load_state_dict(UpperCamelCase_ )
_lowerCAmelCase : List[Any] = 384
_lowerCAmelCase : Tuple = load_demo_image(image_size=UpperCamelCase_ , device="""cpu""" )
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase : Tuple = tokenizer(["""a picture of"""] ).input_ids
_lowerCAmelCase : Union[str, Any] = hf_model.generate(UpperCamelCase_ , UpperCamelCase_ )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
_lowerCAmelCase : Optional[Any] = hf_model.generate(UpperCamelCase_ )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCAmelCase : Optional[Any] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
_lowerCAmelCase : Any = blip_vqa(pretrained=UpperCamelCase_ , image_size=UpperCamelCase_ , vit="""base""" )
vqa_model.eval()
_lowerCAmelCase : Dict = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase : Optional[Any] = modified_state_dict.pop(UpperCamelCase_ )
_lowerCAmelCase : List[str] = rename_key(UpperCamelCase_ )
_lowerCAmelCase : List[str] = value
_lowerCAmelCase : str = BlipForQuestionAnswering(UpperCamelCase_ )
hf_vqa_model.load_state_dict(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = ["""How many dogs are in this image?"""]
_lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : int = hf_vqa_model.generate(UpperCamelCase_ , UpperCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
_lowerCAmelCase : Tuple = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
_lowerCAmelCase : List[str] = blip_itm(pretrained=UpperCamelCase_ , image_size=UpperCamelCase_ , vit="""base""" )
itm_model.eval()
_lowerCAmelCase : str = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase : List[Any] = modified_state_dict.pop(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = rename_key(UpperCamelCase_ )
_lowerCAmelCase : Tuple = value
_lowerCAmelCase : List[str] = BlipForImageTextRetrieval(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = ["""A picture of a woman with a dog sitting in a beach"""]
_lowerCAmelCase : Tuple = tokenizer(
UpperCamelCase_ , return_tensors="""pt""" , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCamelCase_ )
hf_itm_model.eval()
_lowerCAmelCase : Optional[Any] = hf_itm_model(UpperCamelCase_ , UpperCamelCase_ , use_itm_head=UpperCamelCase_ )
_lowerCAmelCase : Dict = hf_itm_model(UpperCamelCase_ , UpperCamelCase_ , use_itm_head=UpperCamelCase_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_lowerCamelCase : Any = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 159
|
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = len(UpperCamelCase_ ) + 1
_lowerCAmelCase : List[Any] = len(UpperCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : List[Any] = [[0 for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase_ ):
for j in range(1 , UpperCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : int = dp[i - 1][j]
else:
_lowerCAmelCase : List[str] = 0
else:
_lowerCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCamelCase : Any = "aab"
_lowerCamelCase : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 159
| 1
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = args.log_outputs
__a = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__a = load_metric('''wer''' )
__a = load_metric('''cer''' )
# compute metrics
__a = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__a = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__a = f'WER: {wer_result}\nCER: {cer_result}'
print(_UpperCAmelCase )
with open(f'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(_UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a = f'log_{dataset_id}_predictions.txt'
__a = f'log_{dataset_id}_targets.txt'
with open(_UpperCAmelCase , '''w''' ) as p, open(_UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(_UpperCAmelCase , _UpperCAmelCase ):
p.write(f'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = '''[,?.!\-\;\:\"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a = re.sub(_UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__a = ''' '''.join(text.split(_UpperCAmelCase ) )
return text
def __snake_case ( _UpperCAmelCase ):
# load dataset
__a = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a = AutoFeatureExtractor.from_pretrained(args.model_id )
__a = feature_extractor.sampling_rate
# resample audio
__a = dataset.cast_column('''audio''' , Audio(sampling_rate=_UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
__a = 0 if torch.cuda.is_available() else -1
__a = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_UpperCAmelCase ):
__a = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__a = prediction['''text''']
__a = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__a = dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__snake_case :Tuple = parser.parse_args()
main(args)
| 49
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) + 1
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__SCREAMING_SNAKE_CASE = [[0 for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
# since string of zero length match pattern of zero length
__SCREAMING_SNAKE_CASE = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase_ ):
for j in range(1 , UpperCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__SCREAMING_SNAKE_CASE = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__SCREAMING_SNAKE_CASE = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__SCREAMING_SNAKE_CASE = dp[i - 1][j]
else:
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__magic_name__ = "aab"
__magic_name__ = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 255
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__magic_name__ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__=3_2):
set_seed(0)
__SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3)
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.00_01)
return model, optimizer
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowerCAmelCase__) for _ in range(4)]
# train with a DDPM scheduler
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
| 255
| 1
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[0] * no_of_processes
_SCREAMING_SNAKE_CASE =[0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =burst_time[i]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =-1
for i in range(_UpperCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE =ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_SCREAMING_SNAKE_CASE =i
total_time += burst_time[target_process]
completed += 1
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =(
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : list[int] ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[0] * no_of_processes
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : List[str] = [2, 5, 3, 7]
lowerCamelCase : int = [0, 0, 0, 0]
lowerCamelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase : int = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 47
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a = None , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Optional[Any]:
UpperCamelCase = path_or_paths
UpperCamelCase = split if split or isinstance(__a , __a ) else "train"
UpperCamelCase = features
UpperCamelCase = cache_dir
UpperCamelCase = keep_in_memory
UpperCamelCase = streaming
UpperCamelCase = num_proc
UpperCamelCase = kwargs
@abstractmethod
def snake_case_ (self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Any:
UpperCamelCase = features
UpperCamelCase = cache_dir
UpperCamelCase = keep_in_memory
UpperCamelCase = streaming
UpperCamelCase = num_proc
UpperCamelCase = kwargs
@abstractmethod
def snake_case_ (self ) -> Union[Dataset, IterableDataset]:
pass
| 153
| 0
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Union[str, Any] = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
UpperCamelCase__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A_ : Union[str, Any] = """https://pypi.org/pypi/diffusers/json"""
A_ : Optional[int] = json.loads(request.urlopen(a_ ).read() )["""releases"""].keys()
return sorted(a_ , key=lambda a_ : version.Version(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a_ )
os.makedirs(a_ , exist_ok=a_ )
A_ : Dict = Path(a_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
init_hf_modules()
A_ : Tuple = Path(a_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a_ , exist_ok=a_ )
A_ : Optional[int] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : int = f.read()
# Imports of the form `import .xxx`
A_ : Optional[int] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , a_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , a_ , flags=re.MULTILINE )
# Unique-ify
return list(set(a_ ) )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : Optional[int] = False
A_ : List[Any] = [module_file]
A_ : int = []
# Let's recurse through all relative imports
while not no_change:
A_ : Optional[int] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a_ ) )
A_ : str = Path(a_ ).parent
A_ : Optional[int] = [str(module_path / m ) for m in new_imports]
A_ : int = [f for f in new_import_files if f not in all_relative_imports]
A_ : Optional[int] = [F"{f}.py" for f in new_import_files]
A_ : Optional[Any] = len(a_ ) == 0
all_relative_imports.extend(a_ )
return all_relative_imports
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : str = f.read()
# Imports of the form `import xxx`
A_ : Dict = re.findall("""^\s*import\s+(\S+)\s*$""" , a_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , a_ , flags=re.MULTILINE )
# Only keep the top-level module
A_ : Optional[Any] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
A_ : Dict = list(set(a_ ) )
A_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(a_ )
except ImportError:
missing_packages.append(a_ )
if len(a_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(a_ )}. Run `pip install {' '.join(a_ )}`" )
return get_relative_imports(a_ )
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : Tuple = module_path.replace(os.path.sep , """.""" )
A_ : Tuple = importlib.import_module(a_ )
if class_name is None:
return find_pipeline_class(a_ )
return getattr(a_ , a_ )
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
A_ : List[str] = dict(inspect.getmembers(a_ , inspect.isclass ) )
A_ : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
A_ : Optional[Any] = cls
return pipeline_class
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , ) -> List[Any]:
"""simple docstring"""
A_ : List[str] = str(a_ )
A_ : Tuple = os.path.join(a_ , a_ )
if os.path.isfile(a_ ):
A_ : int = module_file_or_url
A_ : List[str] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
A_ : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A_ : Optional[Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
A_ : Dict = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
A_ : List[Any] = F"v{revision}"
elif revision == "main":
A_ : Union[str, Any] = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
A_ : int = COMMUNITY_PIPELINES_URL.format(revision=a_ , pipeline=a_ )
try:
A_ : List[Any] = cached_download(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
A_ : List[str] = """git"""
A_ : List[str] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
A_ : Optional[Any] = hf_hub_download(
a_ , a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
A_ : List[Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
A_ : Dict = check_imports(a_ )
# Now we move the module inside our cached dynamic modules.
A_ : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a_ )
A_ : Optional[int] = Path(a_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a_ , submodule_path / module_file )
for module_needed in modules_needed:
A_ : str = F"{module_needed}.py"
shutil.copy(os.path.join(a_ , a_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a_ , a_ ):
A_ : Any = use_auth_token
elif use_auth_token is True:
A_ : Dict = HfFolder.get_token()
else:
A_ : Tuple = None
A_ : Any = model_info(a_ , revision=a_ , token=a_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ : List[str] = submodule_path / commit_hash
A_ : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a_ )
if not (submodule_path / module_file).exists():
shutil.copy(a_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a_ , F"{module_needed}.py" , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return os.path.join(a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , **a_ , ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[Any] = get_cached_module_file(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return get_class_in_module(a_ , final_module.replace(""".py""" , """""" ) )
| 164
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''input_values''', '''attention_mask''']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ) -> List[Any]:
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
A_ : List[Any] = do_normalize
A_ : Union[str, Any] = return_attention_mask
A_ : Tuple = num_mel_bins
A_ : List[str] = hop_length
A_ : int = win_length
A_ : Optional[int] = win_function
A_ : List[Any] = frame_signal_scale
A_ : str = fmin
A_ : Optional[Any] = fmax
A_ : Any = mel_floor
A_ : Any = reduction_factor
A_ : Tuple = win_length * sampling_rate // 1000
A_ : Dict = hop_length * sampling_rate // 1000
A_ : Dict = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
A_ : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
A_ : Dict = np.array(_lowerCamelCase , np.intaa )
A_ : Dict = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
A_ : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
A_ : Any = padding_value
normed_input_values.append(_lowerCamelCase )
else:
A_ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self , _lowerCamelCase , ) -> np.ndarray:
A_ : int = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
A_ : Dict = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
A_ : Optional[int] = None
if audio_target is not None:
A_ : Union[str, Any] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
A_ : Optional[int] = inputs_target["""input_values"""]
A_ : Tuple = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
A_ : int = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
A_ : Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
A_ : List[str] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
A_ : List[str] = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A_ : Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : int = [speech]
# needed to make pad() work on spectrogram inputs
A_ : List[Any] = self.feature_size
# convert into correct format for padding
if is_target:
A_ : Tuple = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
A_ : Tuple = BatchFeature({"""input_values""": features} )
A_ : Dict = self.num_mel_bins
else:
A_ : Union[str, Any] = BatchFeature({"""input_values""": speech} )
A_ : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
A_ : Union[str, Any] = feature_size_hack
# convert input values to correct format
A_ : str = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A_ : Tuple = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A_ : List[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
A_ : Any = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A_ : Any = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A_ : Any = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def UpperCAmelCase_ ( self ) -> Dict[str, Any]:
A_ : List[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A_ : Optional[int] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 164
| 1
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_a ):
print(F"""{i}\t\t{d}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
for j in range(_a ):
UpperCAmelCase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = [float('''inf''' )] * vertex_count
UpperCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_a ):
UpperCAmelCase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCAmelCase = distance[u] + w
UpperCAmelCase = check_negative_cycle(_a , _a , _a )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter number of vertices: ").strip())
__A : Any = int(input("Enter number of edges: ").strip())
__A : str = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__A , __A , __A : Any = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__A : Any = {"src": src, "dst": dest, "weight": weight}
__A : List[str] = int(input("\nEnter shortest path source:").strip())
__A : Tuple = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 273
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 76
| 0
|
from math import factorial
UpperCAmelCase_ = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase_ ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowerCamelCase_ ) if sum_of_digit_factorial(lowerCamelCase_ ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=1_4 , _lowerCAmelCase : Any=7 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=9_9 , _lowerCAmelCase : Optional[int]=3_2 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Union[str, Any]=3_7 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=5_1_2 , _lowerCAmelCase : List[Any]=0.02 , ) -> Any:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case_ = 2_0
snake_case_ = model_class_name(_lowerCAmelCase )
snake_case_ = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_lowerCAmelCase , )
snake_case_ = model(_lowerCAmelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case_ = 2_0
snake_case_ = model_class_name(_lowerCAmelCase )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
snake_case_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@tooslow
def lowerCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
snake_case_ = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
snake_case_ = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ , snake_case_ = pt_inputs["input_ids"].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(_lowerCAmelCase ).eval()
snake_case_ = model_class(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**_lowerCAmelCase ).to_tuple()
snake_case_ = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
snake_case_ = model_class.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase )
snake_case_ = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = pt_model_class(_lowerCAmelCase ).eval()
snake_case_ = model_class(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["input_ids"].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**_lowerCAmelCase ).to_tuple()
snake_case_ = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
snake_case_ = pt_model_class.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase )
with torch.no_grad():
snake_case_ = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 159
|
import os
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
snake_case_ = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
snake_case_ = os.path.join(lowerCAmelCase_ , "triangle.txt" )
with open(lowerCAmelCase_ ) as f:
snake_case_ = f.readlines()
snake_case_ = []
for line in triangle:
snake_case_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(lowerCAmelCase_ ) )
a.append(lowerCAmelCase_ )
for i in range(1 , len(lowerCAmelCase_ ) ):
for j in range(len(a[i] ) ):
snake_case_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase_ , lowerCAmelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 159
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Optional[int] = '▁'
_snake_case : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_snake_case : Optional[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_snake_case : List[str] = {'vinai/bartpho-syllable': 1024}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="</s>" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : int="<mask>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : int , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = monolingual_vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCAmelCase = {}
__lowerCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase_ ) not in self.fairseq_tokens_to_ids:
__lowerCAmelCase = cnt
cnt += 1
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__lowerCAmelCase = line.strip().split()[0]
__lowerCAmelCase = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase_ ) not in self.fairseq_tokens_to_ids:
__lowerCAmelCase = len(self.fairseq_tokens_to_ids )
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> Union[str, Any]:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
__lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , lowerCAmelCase_ : str ) -> List[str]:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return len(self.fairseq_ids_to_tokens )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : int , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : Any , lowerCAmelCase_ : List[Any] ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Tuple:
return self.fairseq_ids_to_tokens[index]
def lowercase ( self : List[str] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
__lowerCAmelCase = ''.join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , ' ' ).strip()
return out_string
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(lowerCAmelCase_ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 355
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , )
return model
@property
def lowercase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__lowerCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=lowerCAmelCase_ , unet=self.dummy_unet , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(raw_audio=lowerCAmelCase_ , generator=lowerCAmelCase_ , start_step=5 , steps=1_0 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase_ , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 1_0) )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , encoding=lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 207
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase: int = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'switch_transformers'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[Any], lowerCAmelCase : List[Any]=32128, lowerCAmelCase : Optional[Any]=768, lowerCAmelCase : Dict=64, lowerCAmelCase : Optional[Any]=2048, lowerCAmelCase : str=64, lowerCAmelCase : int=12, lowerCAmelCase : str=3, lowerCAmelCase : List[Any]=12, lowerCAmelCase : Union[str, Any]=3, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Optional[int]=8, lowerCAmelCase : Union[str, Any]=False, lowerCAmelCase : List[Any]=0.01, lowerCAmelCase : Tuple="float32", lowerCAmelCase : Optional[int]=False, lowerCAmelCase : Union[str, Any]=32, lowerCAmelCase : Optional[Any]=128, lowerCAmelCase : List[Any]=0.1, lowerCAmelCase : Optional[Any]=1e-6, lowerCAmelCase : str=0.001, lowerCAmelCase : str=0.001, lowerCAmelCase : List[str]=1.0, lowerCAmelCase : int="relu", lowerCAmelCase : Tuple=True, lowerCAmelCase : Any=False, lowerCAmelCase : Dict=True, lowerCAmelCase : List[Any]=0, lowerCAmelCase : List[Any]=1, **lowerCAmelCase : Any, ) -> List[str]:
lowercase : List[str] = vocab_size
lowercase : Tuple = d_model
lowercase : Dict = d_kv
lowercase : Union[str, Any] = d_ff
lowercase : str = num_sparse_encoder_layers
lowercase : Optional[int] = num_layers
lowercase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase : int = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase : Any = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase : Any = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase : List[str] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase : Dict = num_heads
lowercase : Any = num_experts
lowercase : List[str] = expert_capacity
lowercase : Union[str, Any] = router_bias
lowercase : Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase : List[Any] = router_dtype
lowercase : Optional[Any] = router_ignore_padding_tokens
lowercase : str = relative_attention_num_buckets
lowercase : Tuple = relative_attention_max_distance
lowercase : str = dropout_rate
lowercase : str = layer_norm_epsilon
lowercase : Optional[int] = initializer_factor
lowercase : Dict = feed_forward_proj
lowercase : Tuple = use_cache
lowercase : Optional[int] = add_router_probs
lowercase : List[Any] = router_z_loss_coef
lowercase : Optional[Any] = router_aux_loss_coef
lowercase : List[str] = self.feed_forward_proj.split('-' )
lowercase : Dict = act_info[-1]
lowercase : str = act_info[0] == 'gated'
if len(lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase : Tuple = 'gelu_new'
super().__init__(
pad_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase, )
| 255
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = None ) -> list[list[str]]:
'''simple docstring'''
lowercase : str = word_bank or []
# create a table
lowercase : int = len(_UpperCAmelCase ) + 1
lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowercase : int = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 255
| 1
|
'''simple docstring'''
import argparse
import os
import re
_lowercase : List[str] = "src/diffusers"
# Pattern that looks at the indentation in a line.
_lowercase : Union[str, Any] = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : str = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : str = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : Optional[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : str = re.compile(r"\[([^\]]+)\]")
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Dict = _re_indent.search(__SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]="" , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
lowercase_ : List[str] = 0
lowercase_ : List[str] = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(__SCREAMING_SNAKE_CASE ):
index += 1
lowercase_ : int = ['''\n'''.join(lines[:index] )]
else:
lowercase_ : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ : Optional[int] = [lines[index]]
index += 1
while index < len(__SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(__SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
if index < len(__SCREAMING_SNAKE_CASE ) - 1:
lowercase_ : Union[str, Any] = [lines[index + 1]]
index += 1
else:
lowercase_ : List[str] = []
else:
blocks.append('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[int] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__SCREAMING_SNAKE_CASE ) > 0:
blocks.append('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__SCREAMING_SNAKE_CASE ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
def _inner(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return key(__SCREAMING_SNAKE_CASE ).lower().replace('''_''' , '''''' )
return _inner
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
def noop(__SCREAMING_SNAKE_CASE : Optional[int] ):
return x
if key is None:
lowercase_ : Dict = noop
# Constants are all uppercase, they go first.
lowercase_ : List[Any] = [obj for obj in objects if key(__SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ : Tuple = [obj for obj in objects if key(__SCREAMING_SNAKE_CASE )[0].isupper() and not key(__SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ : Any = [obj for obj in objects if not key(__SCREAMING_SNAKE_CASE )[0].isupper()]
lowercase_ : List[str] = ignore_underscore(__SCREAMING_SNAKE_CASE )
return sorted(__SCREAMING_SNAKE_CASE , key=__SCREAMING_SNAKE_CASE ) + sorted(__SCREAMING_SNAKE_CASE , key=__SCREAMING_SNAKE_CASE ) + sorted(__SCREAMING_SNAKE_CASE , key=__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
def _replace(__SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Any = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowercase_ : Union[str, Any] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : int = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__SCREAMING_SNAKE_CASE )] ) + "]"
lowercase_ : List[Any] = import_statement.split('''\n''' )
if len(__SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ : Tuple = 2 if lines[1].strip() == '''[''' else 1
lowercase_ : int = [(i, _re_strip_line.search(__SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ : Union[str, Any] = sort_objects(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[1] )
lowercase_ : List[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ : List[str] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ : List[str] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : int = keys[:-1]
lowercase_ : Optional[Any] = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(__SCREAMING_SNAKE_CASE )] )
return "\n".join(__SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ : int = _re_bracket_content.sub(_replace , __SCREAMING_SNAKE_CASE )
return import_statement
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=True ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ : Any = split_code_in_indented_blocks(
__SCREAMING_SNAKE_CASE , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ : Optional[Any] = main_blocks[block_idx]
lowercase_ : str = block.split('''\n''' )
# Get to the start of the imports.
lowercase_ : Any = 0
while line_idx < len(__SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ : int = len(__SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(__SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ : Any = '''\n'''.join(block_lines[line_idx:-1] )
lowercase_ : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ : Optional[Any] = split_code_in_indented_blocks(__SCREAMING_SNAKE_CASE , indent_level=__SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ : Optional[Any] = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ : str = [(pattern.search(__SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(__SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ : List[str] = [(i, key) for i, key in enumerate(__SCREAMING_SNAKE_CASE ) if key is not None]
lowercase_ : List[str] = [x[0] for x in sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowercase_ : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ : Tuple = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int]=True ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase_ : Union[str, Any] = sort_imports(os.path.join(__SCREAMING_SNAKE_CASE , '''__init__.py''' ) , check_only=__SCREAMING_SNAKE_CASE )
if result:
lowercase_ : List[Any] = [os.path.join(__SCREAMING_SNAKE_CASE , '''__init__.py''' )]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Would overwrite {len(__SCREAMING_SNAKE_CASE )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 264
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = num_of_nodes
lowercase_ : list[list[int]] = []
lowercase_ : dict[int, int] = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ : Optional[int] = self.find_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase_ : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ : int = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = []
lowercase_ : Optional[Any] = 0
lowercase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ , lowercase_ , lowercase_ : List[Any] = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ , lowercase_ : str = edge
lowercase_ : Tuple = self.m_component[u]
lowercase_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase_ : str = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
def _A ( ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase__ = parser.parse_args()
return args.f
class A ( __UpperCAmelCase ):
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCamelCase__ )
lowercase__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase__ )
lowercase__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase__ )
| 164
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A = "sshleifer/mar_enro_6_3_student"
class A ( __UpperCAmelCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
lowercase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase__ , )
lowercase__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def A__ ( self ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowercase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowercase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace("""--fp16""" , """""" )
lowercase__ = 6
lowercase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 164
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase : int ):
_A : Optional[int] = [True] * limit
_A : Any = False
_A : Optional[int] = False
_A : Tuple = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
_A : Dict = i * 2
while index < limit:
_A : List[Any] = False
_A : Optional[int] = index + i
_A : Optional[int] = [2]
for i in range(3 ,lowerCamelCase ,2 ):
if is_prime[i]:
primes.append(lowerCamelCase )
return primes
def lowerCAmelCase__ ( lowerCamelCase : int = 1000000 ):
_A : List[str] = prime_sieve(lowerCamelCase )
_A : Any = 0
_A : str = 0
for i in range(len(lowerCamelCase ) ):
for j in range(i + length ,len(lowerCamelCase ) ):
_A : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_A : Tuple = j - i
_A : Any = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 227
|
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = 42
a = None
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Tuple=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : Optional[Any] = i / num_diffusion_timesteps
_A : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : float = 1.0 , **SCREAMING_SNAKE_CASE : List[str] , ):
if kwargs.get('set_alpha_to_one' , SCREAMING_SNAKE_CASE) is not None:
_A : Tuple = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE)
_A : Tuple = kwargs['set_alpha_to_one']
if trained_betas is not None:
_A : Any = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : List[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Optional[int] = 1.0 - self.betas
_A : Union[str, Any] = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_A : Optional[int] = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_A : Union[str, Any] = 1.0
# setable values
_A : List[str] = None
_A : Dict = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE).copy().astype(np.intaa))
def A ( self : str , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None):
return sample
def A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.')
_A : Optional[Any] = num_inference_steps
_A : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : List[str] = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round().copy().astype(np.intaa)
_A : int = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.timesteps += self.config.steps_offset
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : bool = True , ):
# 1. get previous step value (=t+1)
_A : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_A : List[str] = self.alphas_cumprod[timestep]
_A : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_A : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_A : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_A : List[Any] = model_output
elif self.config.prediction_type == "sample":
_A : List[Any] = model_output
_A : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_A : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_A : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_A : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE)
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowercase__ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = {}
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = {}
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
_UpperCamelCase : str = probability
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return list(self.connections )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = Counter(graph.get_nodes() )
_UpperCamelCase : Dict = start
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = graph.transition(UpperCAmelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
|
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 83
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(UpperCamelCase__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
snake_case : list[float] = list(UpperCamelCase__ )
snake_case : int = degree
def __add__( self : int , UpperCamelCase__ : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
snake_case : Tuple = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase__ )
else:
snake_case : List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase__ )
def __sub__( self : Tuple , UpperCamelCase__ : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Polynomial ) -> Polynomial:
"""simple docstring"""
snake_case : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : int | float ) -> int | float:
"""simple docstring"""
snake_case : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
snake_case : List[Any] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase__ )
return polynomial
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return self.__str__()
def lowerCAmelCase ( self : Any ) -> Polynomial:
"""simple docstring"""
snake_case : list[float] = [0] * self.degree
for i in range(self.degree ):
snake_case : Dict = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase__ )
def lowerCAmelCase ( self : int , UpperCamelCase__ : int | float = 0 ) -> Polynomial:
"""simple docstring"""
snake_case : list[float] = [0] * (self.degree + 2)
snake_case : Union[str, Any] = constant
for i in range(self.degree + 1 ):
snake_case : Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase__ )
def __eq__( self : Any , UpperCamelCase__ : object ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Union[str, Any] , UpperCamelCase__ : object ) -> bool:
"""simple docstring"""
return not self.__eq__(UpperCamelCase__ )
| 83
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class a ( A__ ):
UpperCAmelCase_ : List[str] ="cvt"
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=[7, 3, 3] , _lowerCamelCase=[4, 2, 2] , _lowerCamelCase=[2, 1, 1] , _lowerCamelCase=[6_4, 1_9_2, 3_8_4] , _lowerCamelCase=[1, 3, 6] , _lowerCamelCase=[1, 2, 1_0] , _lowerCamelCase=[4.0, 4.0, 4.0] , _lowerCamelCase=[0.0, 0.0, 0.0] , _lowerCamelCase=[0.0, 0.0, 0.0] , _lowerCamelCase=[0.0, 0.0, 0.1] , _lowerCamelCase=[True, True, True] , _lowerCamelCase=[False, False, True] , _lowerCamelCase=["dw_bn", "dw_bn", "dw_bn"] , _lowerCamelCase=[3, 3, 3] , _lowerCamelCase=[1, 1, 1] , _lowerCamelCase=[2, 2, 2] , _lowerCamelCase=[1, 1, 1] , _lowerCamelCase=[1, 1, 1] , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowercase = num_channels
lowercase = patch_sizes
lowercase = patch_stride
lowercase = patch_padding
lowercase = embed_dim
lowercase = num_heads
lowercase = depth
lowercase = mlp_ratio
lowercase = attention_drop_rate
lowercase = drop_rate
lowercase = drop_path_rate
lowercase = qkv_bias
lowercase = cls_token
lowercase = qkv_projection_method
lowercase = kernel_qkv
lowercase = padding_kv
lowercase = stride_kv
lowercase = padding_q
lowercase = stride_q
lowercase = initializer_range
lowercase = layer_norm_eps
| 220
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = """pixel_values"""
lowercase__ = False
lowercase__ = TimmBackboneConfig
def __init__( self : Tuple, lowerCamelCase : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(lowerCamelCase )
lowercase__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ = getattr(lowerCamelCase, '''use_pretrained_backbone''', lowerCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ = config.out_indices if getattr(lowerCamelCase, '''out_indices''', lowerCamelCase ) is not None else (-1,)
lowercase__ = timm.create_model(
config.backbone, pretrained=lowerCamelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCamelCase, **lowerCamelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ = self._backbone.return_layers
lowercase__ = {layer['''module''']: str(lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[str], lowerCamelCase : List[str], *lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ = kwargs.pop('''config''', TimmBackboneConfig() )
lowercase__ = kwargs.pop('''use_timm_backbone''', lowerCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ = kwargs.pop('''num_channels''', config.num_channels )
lowercase__ = kwargs.pop('''features_only''', config.features_only )
lowercase__ = kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowercase__ = kwargs.pop('''out_indices''', config.out_indices )
lowercase__ = TimmBackboneConfig(
backbone=lowerCamelCase, num_channels=lowerCamelCase, features_only=lowerCamelCase, use_pretrained_backbone=lowerCamelCase, out_indices=lowerCamelCase, )
return super()._from_config(lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : int=None, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ = self._all_layers
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = self._return_layers
lowercase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = None
lowercase__ = tuple(lowerCamelCase )
lowercase__ = tuple(lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
lowercase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase, hidden_states=lowerCamelCase, attentions=lowerCamelCase )
| 207
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : Optional[Any] , _lowercase : Union[str, Any] ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : Optional[int] , _lowercase : Dict ):
__UpperCAmelCase = self.get_masked_index(__a )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : List[str] , _lowercase : Optional[int] ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Optional[Any]=None , **_lowercase : Any ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
__UpperCAmelCase = self.model(**__a )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : List[str] , _lowercase : str , _lowercase : str=5 , _lowercase : List[Any]=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(__a , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(__a , 0 )
__UpperCAmelCase = tf.math.top_k(__a , k=__a )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(__a )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def a ( self : Any , _lowercase : Dict , _lowercase : List[Any]=None ):
if isinstance(__a , __a ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(__a , __a )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )['''input_ids''']
if len(__a ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(__a ) )
if len(__a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(__a )
return target_ids
def a ( self : List[str] , _lowercase : str=None , _lowercase : List[str]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(__a , __a )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Dict , _lowercase : List[Any] , *_lowercase : int , **_lowercase : List[str] ):
__UpperCAmelCase = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 361
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowercase : List[Any] = TypeVar('T')
class _UpperCAmelCase ( Generic[T] ):
a__ : deque[T] # Cache store of keys
a__ : set[T] # References of the keys in cache
a__ : int = 10 # Maximum capacity of cache
def __init__( self : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = deque()
__UpperCAmelCase = set()
if not n:
__UpperCAmelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__UpperCAmelCase = n
def a ( self : Optional[Any] , _lowercase : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCAmelCase = self.dq_store.pop()
self.key_reference.remove(_lowercase )
else:
self.dq_store.remove(_lowercase )
self.dq_store.appendleft(_lowercase )
self.key_reference.add(_lowercase )
def a ( self : str ):
for k in self.dq_store:
print(_lowercase )
def __repr__( self : Dict ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 86
| 0
|
"""simple docstring"""
def __lowercase ( _a , _a ):
snake_case_ : str = word.split()
def justify(_a , _a , _a ) -> str:
snake_case_ : List[str] = max_width - width
snake_case_ : str = len(_a )
if len(_a ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case_ : Any = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case_ : List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_a ):
num_spaces_between_words_list[i] += 1
snake_case_ : int = []
for i in range(_a ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_a )
snake_case_ : Any = []
snake_case_ : list[str] = []
snake_case_ : List[Any] = 0
for word in words:
if width + len(_a ) + len(_a ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_a )
width += len(_a )
else:
# justify the line and add it to result
answer.append(justify(_a , _a , _a ) )
# reset new line and new width
snake_case_, snake_case_ : List[Any] = [word], len(_a )
snake_case_ : int = max_width - width - len(_a )
answer.append(''' '''.join(_a ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 264
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 357
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowercase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowercase = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A (__lowerCamelCase :str ):
_lowerCAmelCase = None
# source code of `config_class`
_lowerCAmelCase = inspect.getsource(__lowerCamelCase )
_lowerCAmelCase = _re_checkpoint.findall(__lowerCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
_lowerCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase = ckpt_name
break
return checkpoint
def A ():
_lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase = get_checkpoint_from_config_class(__lowerCamelCase )
_lowerCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_lowerCAmelCase = """\n""".join(sorted(__lowerCamelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 229
| 0
|
from math import pow, sqrt
def a( *A : float ) -> bool:
"""simple docstring"""
a = len(A ) > 0 and all(value > 0.0 for value in values )
return result
def a( A : float , A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A , A )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def a( A : float , A : float , A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A , A , A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def a( A : float , A : float , A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A , A , A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def a( A : float , A : float , A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A , A , A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def a( A : float , A : float , A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A , A , A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 227
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowercase: Optional[int] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_lowercase: Dict = 10
_lowercase: Optional[Any] = 256
def a( A : List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(A ) < MIN_NUM_TOKENS:
return None
a = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def a( A : str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class _lowercase :
"""simple docstring"""
def __init__(self , *,
lowerCamelCase_ = 0.85 , ):
"""simple docstring"""
a = duplication_jaccard_threshold
a = NUM_PERM
a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self._index.query(lowerCamelCase_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = []
for base, duplicates in self._duplicate_clusters.items():
a = [base] + list(lowerCamelCase_ )
# reformat the cluster to be a list of dict
a = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase_ )
return duplicate_clusters
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.get_duplicate_clusters()
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def a( A : Any ) -> List[Any]:
"""simple docstring"""
a , a = element
a = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a( A : Type[Dataset] ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(A , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def a( A : Type[Dataset] , A : float ) -> Dict:
"""simple docstring"""
a = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) , max_queue_size=100 ) ):
di.add(A , A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a( A : str , A : str ) -> float:
"""simple docstring"""
a = get_tokens(A )
a = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowercase: int = None
def a( A : str , A : Tuple ) -> int:
"""simple docstring"""
a = []
for elementa in cluster:
a = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
a = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A , A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a = 1
extremes.append(A )
return extremes
def a( A : str , A : List[str] , A : int ) -> Tuple:
"""simple docstring"""
global _shared_dataset
a = dataset
a = []
a = partial(_find_cluster_extremes_shared , jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A , A , ) , total=len(A ) , ):
extremes_list.append(A )
return extremes_list
def a( A : Type[Dataset] , A : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
a = make_duplicate_clusters(A , A )
a = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
a = {}
a = find_extremes(A , A , A )
for extremes in extremes_clusters:
for element in extremes:
a = element
a = duplicate_indices - set(extreme_dict.keys() )
a = dataset.filter(lambda A , A : idx not in remove_indices , with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a = element["base_index"] in extreme_dict
if element["is_extreme"]:
a = extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(A )}''' )
print(f'''Number of duplicate clusters: {len(A )}''' )
print(f'''Files in duplicate cluster: {len(A )}''' )
print(f'''Unique files in duplicate cluster: {len(A )}''' )
print(f'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 227
| 1
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_="", SCREAMING_SNAKE_CASE_="train" ) -> Tuple:
assert os.path.isdir(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = []
UpperCamelCase : str = os.listdir(SCREAMING_SNAKE_CASE_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> List[Any]:
return len(self.documents )
def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = self.documents[idx]
UpperCamelCase : Tuple = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE_, encoding='utf-8' ) as source:
UpperCamelCase : List[str] = source.read()
UpperCamelCase : str = process_story(SCREAMING_SNAKE_CASE_ )
return document_name, story_lines, summary_lines
def UpperCamelCase ( snake_case__ : int ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = list(filter(lambda snake_case__ : len(snake_case__ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCamelCase : Any = [_add_missing_period(snake_case__ ) for line in nonempty_lines]
# gather article lines
UpperCamelCase : List[str] = []
UpperCamelCase : Union[str, Any] = deque(snake_case__ )
while True:
try:
UpperCamelCase : Tuple = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(snake_case__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCamelCase : List[str] = list(filter(lambda snake_case__ : not t.startswith('@highlight' ) , snake_case__ ) )
return story_lines, summary_lines
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[Any]:
UpperCamelCase : Dict = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[Any] ) -> Dict:
if len(snake_case__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(snake_case__ )) )
return sequence
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Tuple ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = torch.ones_like(snake_case__ )
UpperCamelCase : List[Any] = sequence == pad_token_id
UpperCamelCase : List[str] = 0
return mask
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : Dict = [tokenizer.encode(snake_case__ ) for line in story_lines]
UpperCamelCase : Any = [token for sentence in story_lines_token_ids for token in sentence]
UpperCamelCase : Optional[int] = [tokenizer.encode(snake_case__ ) for line in summary_lines]
UpperCamelCase : Any = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def UpperCamelCase ( snake_case__ : Any , snake_case__ : Dict ) -> str:
UpperCamelCase : Tuple = []
for sequence in batch:
UpperCamelCase : Optional[int] = -1
UpperCamelCase : Optional[int] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(snake_case__ )
return torch.tensor(snake_case__ )
| 363
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__ : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__ : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='pt' )
UpperCamelCase : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'LABEL_0', 'score': 0.5_04}] )
UpperCamelCase : Dict = text_classifier('This is great !', top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}] )
UpperCamelCase : List[Any] = text_classifier(['This is great !', 'This is bad'], top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
], )
UpperCamelCase : List[Any] = text_classifier('This is great !', top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'LABEL_0', 'score': 0.5_04}] )
# Legacy behavior
UpperCamelCase : str = text_classifier('This is great !', return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'LABEL_0', 'score': 0.5_04}] )
UpperCamelCase : Tuple = text_classifier('This is great !', return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]] )
UpperCamelCase : List[Any] = text_classifier(['This is great !', 'Something else'], return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
], )
UpperCamelCase : Optional[int] = text_classifier(['This is great !', 'Something else'], return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [
{'label': 'LABEL_0', 'score': 0.5_04},
{'label': 'LABEL_0', 'score': 0.5_04},
], )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : str = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='pt', device=torch.device('cpu' ), )
UpperCamelCase : List[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'LABEL_0', 'score': 0.5_04}] )
@require_tf
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[Any] = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='tf' )
UpperCamelCase : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'LABEL_0', 'score': 0.5_04}] )
@slow
@require_torch
def snake_case_ ( self ) -> int:
UpperCamelCase : str = pipeline('text-classification' )
UpperCamelCase : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'POSITIVE', 'score': 1.0}] )
UpperCamelCase : List[str] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'NEGATIVE', 'score': 1.0}] )
UpperCamelCase : int = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'POSITIVE', 'score': 0.9_88}] )
@slow
@require_tf
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : int = pipeline('text-classification', framework='tf' )
UpperCamelCase : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'POSITIVE', 'score': 1.0}] )
UpperCamelCase : Union[str, Any] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'NEGATIVE', 'score': 1.0}] )
UpperCamelCase : int = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': 'POSITIVE', 'score': 0.9_88}] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : str = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCamelCase : List[str] = 'HuggingFace is in'
UpperCamelCase : Tuple = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
UpperCamelCase : List[Any] = ['HuggingFace is in ', 'Paris is in France']
UpperCamelCase : List[str] = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}, {'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}], )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCamelCase : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE_, top_k=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [[{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}] * N], )
UpperCamelCase : str = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
UpperCamelCase : Union[str, Any] = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}, )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCamelCase : List[str] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_classifier(SCREAMING_SNAKE_CASE_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCamelCase : List[str] = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), [{'label': ANY(SCREAMING_SNAKE_CASE_ ), 'score': ANY(SCREAMING_SNAKE_CASE_ )}], )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 103
| 0
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowercase__ :
def __init__( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_UpperCamelCase : Optional[Any] = img
_UpperCamelCase : List[str] = img.shape[1]
_UpperCamelCase : Optional[Any] = img.shape[0]
_UpperCamelCase : str = dst_width
_UpperCamelCase : Any = dst_height
_UpperCamelCase : Any = self.src_w / self.dst_w
_UpperCamelCase : Union[str, Any] = self.src_h / self.dst_h
_UpperCamelCase : List[str] = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_UpperCamelCase : Optional[Any] = self.img[self.get_y(lowerCamelCase__ )][self.get_x(lowerCamelCase__ )]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
snake_case_ , snake_case_ : List[str] = 800, 600
snake_case_ : Optional[Any] = imread('image_data/lena.jpg', 1)
snake_case_ : Dict = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 83
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( lowercase ):
lowercase__ = """openai/whisper-base"""
lowercase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase__ = """transcriber"""
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ["""audio"""]
lowercase__ = ["""text"""]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ ,return_tensors='pt' ).input_features
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0]
| 83
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 59
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = '''true'''
def lowerCamelCase_ ( _a : List[Any] , _a : List[str]=82 , _a : Tuple=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : int = RegressionModel()
UpperCAmelCase_ : List[Any] = deepcopy(_a )
UpperCAmelCase_ : Tuple = RegressionDataset(length=_a )
UpperCAmelCase_ : int = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a : Accelerator , _a : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCAmelCase_ : int = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_a : str ):
UpperCAmelCase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : List[str] ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a : Any , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCAmelCase_ : Dict = get_dataloader(_a , not dispatch_batches )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch.values()
with torch.no_grad():
UpperCAmelCase_ : str = model(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a : Accelerator , _a : str=82 , _a : str=False , _a : Dict=False , _a : Dict=16 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_basic_setup(_a , _a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'''
def lowerCamelCase_ ( _a : bool = False , _a : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ , UpperCAmelCase_ : str = get_mrpc_setup(_a , _a )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCAmelCase_ : str = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : List[str] = model(**_a )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = batch["""labels"""]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCAmelCase_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[int] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCAmelCase_ : str = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 59
| 1
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = to_pil_image(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = pil_image.size
UpperCAmelCase__ = pytesseract.image_to_data(lowerCamelCase , lang=lowerCamelCase , output_type='dict' , config=lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase__ = [idx for idx, word in enumerate(lowerCamelCase ) if not word.strip()]
UpperCAmelCase__ = [word for idx, word in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase__ = []
for x, y, w, h in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase )
# finally, normalize the bounding boxes
UpperCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
assert len(lowerCamelCase ) == len(lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : int ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "" ,**lowerCamelCase__ : Any ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_value
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase__ = apply_ocr
UpperCAmelCase__ = ocr_lang
UpperCAmelCase__ = tesseract_config
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase__ = (size['height'], size['width'])
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Optional[int] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,'pytesseract' )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for image in images:
UpperCAmelCase__ , UpperCAmelCase__ = apply_tesseract(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = BatchFeature(data={'pixel_values': images} ,tensor_type=lowerCamelCase__ )
if apply_ocr:
UpperCAmelCase__ = words_batch
UpperCAmelCase__ = boxes_batch
return data
| 98
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 86
| 0
|
from __future__ import annotations
import math
class snake_case_ :
def __init__( self : List[Any] , lowercase_ : int ) -> None:
lowercase__ : Tuple = size
# approximate the overall size of segment tree with given value
lowercase__ : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowercase__ : List[Any] = [0 for i in range(0 , 4 * size )]
lowercase__ : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Dict , lowercase_ : int ) -> int:
return idx * 2
def __UpperCamelCase ( self : Any , lowercase_ : int ) -> int:
return idx * 2 + 1
def __UpperCamelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : list[int] ) -> None:
if left_element == right_element:
lowercase__ : Optional[int] = a[left_element - 1]
else:
lowercase__ : Union[str, Any] = (left_element + right_element) // 2
self.build(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ )
self.build(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ )
lowercase__ : List[str] = max(
self.segment_tree[self.left(lowercase_ )] , self.segment_tree[self.right(lowercase_ )] )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> bool:
if self.flag[idx] is True:
lowercase__ : Dict = self.lazy[idx]
lowercase__ : List[Any] = False
if left_element != right_element:
lowercase__ : List[str] = self.lazy[idx]
lowercase__ : Optional[int] = self.lazy[idx]
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowercase__ : Any = val
if left_element != right_element:
lowercase__ : int = val
lowercase__ : List[str] = val
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
return True
lowercase__ : Tuple = (left_element + right_element) // 2
self.update(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.update(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = max(
self.segment_tree[self.left(lowercase_ )] , self.segment_tree[self.right(lowercase_ )] )
return True
def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> int | float:
if self.flag[idx] is True:
lowercase__ : str = self.lazy[idx]
lowercase__ : Any = False
if left_element != right_element:
lowercase__ : Dict = self.lazy[idx]
lowercase__ : Optional[int] = self.lazy[idx]
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowercase__ : Dict = (left_element + right_element) // 2
lowercase__ : List[str] = self.query(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = self.query(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ , lowercase_ )
return max(lowercase_ , lowercase_ )
def __str__( self : List[Any] ) -> str:
return str([self.query(1 , 1 , self.size , lowercase_ , lowercase_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCamelCase = 15
UpperCamelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 333
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333
| 1
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
UpperCamelCase__ : Any = len(snake_case_ ) // 2
# choose the middle 3 elements
UpperCamelCase__ : int = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[int]:
__lowerCAmelCase = np.random.default_rng(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = length
__lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
__lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return self.length
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Any:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = True
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> str:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a + self.b
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : int = 16 ) -> int:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__lowerCAmelCase = load_dataset("""csv""" , data_files=snake_case_ )
__lowerCAmelCase = datasets["""train"""].unique("""label""" )
__lowerCAmelCase = {v: i for i, v in enumerate(snake_case_ )}
def tokenize_function(snake_case_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ , padding="""max_length""" )
if "label" in examples:
__lowerCAmelCase = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(tokenized_datasets["""train"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=2 )
__lowerCAmelCase = DataLoader(tokenized_datasets["""validation"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 229
| 0
|
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
if len(lowercase__ ) <= 1:
return lst
__lowercase= 1
while i < len(lowercase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowercase, __lowercase= lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowercase= 1
return lst
if __name__ == "__main__":
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 304
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A_ : Dict = '''examples/'''
A_ : Any = {
'''examples''': (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), '''release = "VERSION"\n'''),
}
A_ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A_ : Any = '''README.md'''
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
SCREAMING_SNAKE_CASE__ = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE__ = replace.replace("""VERSION""" , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def A ( snake_case__ ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def A ( snake_case__ , snake_case__=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE__ = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
SCREAMING_SNAKE_CASE__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def A ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
SCREAMING_SNAKE_CASE__ = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def A ( snake_case__=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can\'t create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE__ = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE__ = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
SCREAMING_SNAKE_CASE__ = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE__ = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__UpperCamelCase ) == 0:
SCREAMING_SNAKE_CASE__ = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_version()
SCREAMING_SNAKE_CASE__ = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
SCREAMING_SNAKE_CASE__ = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE__ = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__UpperCamelCase ) == 0:
SCREAMING_SNAKE_CASE__ = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase )
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A_ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 165
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : Optional[int] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __snake_case ( UpperCamelCase_ ):
_a = '''data2vec-vision'''
def __init__( self : Tuple , A_ : List[Any]=7_6_8 , A_ : Union[str, Any]=1_2 , A_ : Dict=1_2 , A_ : List[Any]=3_0_7_2 , A_ : Dict="gelu" , A_ : Tuple=0.0 , A_ : Dict=0.0 , A_ : List[str]=0.02 , A_ : List[str]=1e-12 , A_ : Tuple=2_2_4 , A_ : Dict=1_6 , A_ : Optional[int]=3 , A_ : Optional[int]=False , A_ : Any=False , A_ : Tuple=False , A_ : Optional[int]=False , A_ : int=0.1 , A_ : Union[str, Any]=0.1 , A_ : List[Any]=True , A_ : List[Any]=[3, 5, 7, 1_1] , A_ : Union[str, Any]=[1, 2, 3, 6] , A_ : Optional[int]=True , A_ : Any=0.4 , A_ : str=2_5_6 , A_ : Optional[int]=1 , A_ : str=False , A_ : Optional[int]=2_5_5 , **A_ : Optional[int] , ):
super().__init__(**A_)
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : List[Any] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Any = use_mask_token
lowerCAmelCase_ : Optional[int] = use_absolute_position_embeddings
lowerCAmelCase_ : str = use_relative_position_bias
lowerCAmelCase_ : Optional[Any] = use_shared_relative_position_bias
lowerCAmelCase_ : Dict = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ : Any = out_indices
lowerCAmelCase_ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ : Dict = use_auxiliary_head
lowerCAmelCase_ : str = auxiliary_loss_weight
lowerCAmelCase_ : Optional[Any] = auxiliary_channels
lowerCAmelCase_ : str = auxiliary_num_convs
lowerCAmelCase_ : str = auxiliary_concat_input
lowerCAmelCase_ : str = semantic_loss_ignore_index
class __snake_case ( UpperCamelCase_ ):
_a = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : Tuple):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase__ ( self : Dict):
return 1e-4
| 103
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : List[str]=30 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Union[str, Any]=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[int] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = ViTMSNModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_snake_case : int = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Optional[Any] = False
_snake_case : int = False
_snake_case : List[str] = False
_snake_case : int = False
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = ViTMSNModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ViTMSNModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : str ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(2 )
_UpperCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(__lowerCAmelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 30
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 1
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
snake_case : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
snake_case : Tuple = "xvjiarui/stable-diffusion-2-inpainting"
snake_case , snake_case : Dict = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ )
snake_case : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
snake_case : Optional[Any] = jax.random.PRNGKey(0 )
snake_case : List[str] = 50
snake_case : Tuple = jax.device_count()
snake_case : Optional[Any] = num_samples * [prompt]
snake_case : List[str] = num_samples * [init_image]
snake_case : str = num_samples * [mask_image]
snake_case , snake_case , snake_case : int = pipeline.prepare_inputs(snake_case__ , snake_case__ , snake_case__ )
# shard inputs and rng
snake_case : Tuple = replicate(snake_case__ )
snake_case : Optional[Any] = jax.random.split(snake_case__ , jax.device_count() )
snake_case : Union[str, Any] = shard(snake_case__ )
snake_case : Union[str, Any] = shard(snake_case__ )
snake_case : List[str] = shard(snake_case__ )
snake_case : int = pipeline(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ )
snake_case : List[str] = output.images.reshape(snake_case__ , 5_12 , 5_12 , 3 )
snake_case : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
snake_case : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case : Any = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 59
|
from __future__ import annotations
__lowerCamelCase = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
if location := find_empty_location(__lowerCamelCase ):
snake_case , snake_case : Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
snake_case : List[Any] = digit
if sudoku(__lowerCamelCase ) is not None:
return grid
snake_case : Union[str, Any] = 0
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(__lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 59
| 1
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
snake_case = b * b - 4 * a * c
snake_case = (-b + sqrt(__lowerCAmelCase )) / (2 * a)
snake_case = (-b - sqrt(__lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCamelCase ( ) -> str:
snake_case , snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 3
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3
| 1
|
from __future__ import annotations
import math
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> None:
__UpperCAmelCase = size
# approximate the overall size of segment tree with given value
__UpperCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCAmelCase = [0 for i in range(0 , 4 * size )]
__UpperCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCAmelCase_ (self , lowercase__ ) -> int:
return idx * 2
def lowerCAmelCase_ (self , lowercase__ ) -> int:
return idx * 2 + 1
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> None:
if left_element == right_element:
__UpperCAmelCase = a[left_element - 1]
else:
__UpperCAmelCase = (left_element + right_element) // 2
self.build(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ )
self.build(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ )
__UpperCAmelCase = max(
self.segment_tree[self.left(lowercase__ )] , self.segment_tree[self.right(lowercase__ )] )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> bool:
if self.flag[idx] is True:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = False
if left_element != right_element:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = True
__UpperCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCAmelCase = val
if left_element != right_element:
__UpperCAmelCase = val
__UpperCAmelCase = val
__UpperCAmelCase = True
__UpperCAmelCase = True
return True
__UpperCAmelCase = (left_element + right_element) // 2
self.update(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.update(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__UpperCAmelCase = max(
self.segment_tree[self.left(lowercase__ )] , self.segment_tree[self.right(lowercase__ )] )
return True
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int | float:
if self.flag[idx] is True:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = False
if left_element != right_element:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = True
__UpperCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCAmelCase = (left_element + right_element) // 2
__UpperCAmelCase = self.query(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__UpperCAmelCase = self.query(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ , lowercase__ )
return max(lowercase__ , lowercase__ )
def __str__(self ) -> str:
return str([self.query(1 , 1 , self.size , lowercase__ , lowercase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
A_ : List[Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
A_ : Optional[Any] = 15
A_ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 333
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=13 , SCREAMING_SNAKE_CASE : List[Any]=30 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Tuple=5 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : List[Any]=10 , SCREAMING_SNAKE_CASE : Any=0.02 , ):
_A : Any = parent
_A : Optional[int] = batch_size
_A : List[str] = image_size
_A : Dict = patch_size
_A : List[Any] = num_channels
_A : Any = is_training
_A : str = use_labels
_A : int = hidden_size
_A : int = num_hidden_layers
_A : Any = num_attention_heads
_A : Optional[int] = intermediate_size
_A : Tuple = hidden_act
_A : Tuple = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : Optional[Any] = type_sequence_label_size
_A : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : Union[str, Any] = (image_size // patch_size) ** 2
_A : Dict = num_patches + 1
def A ( self : List[Any]):
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any):
_A : List[str] = FlaxViTModel(config=SCREAMING_SNAKE_CASE)
_A : Any = model(SCREAMING_SNAKE_CASE)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_A : Optional[int] = (self.image_size, self.image_size)
_A : str = (self.patch_size, self.patch_size)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]):
_A : Tuple = self.type_sequence_label_size
_A : Optional[int] = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE)
_A : List[str] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_A : Dict = 1
_A : List[str] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE)
_A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_A : Tuple = model(SCREAMING_SNAKE_CASE)
def A ( self : Tuple):
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) ,
) : Dict = config_and_inputs
_A : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def A ( self : List[str]):
_A : List[Any] = FlaxViTModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37)
def A ( self : str):
self.config_tester.run_common_tests()
def A ( self : List[Any]):
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(SCREAMING_SNAKE_CASE)
_A : Any = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
def A ( self : List[str]):
_A , _A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_A : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Tuple = model_class(SCREAMING_SNAKE_CASE)
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str):
return model(pixel_values=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
with self.subTest('JIT Enabled'):
_A : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_A : Dict = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE) , len(SCREAMING_SNAKE_CASE))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def A ( self : Optional[int]):
for model_class_name in self.all_model_classes:
_A : Dict = model_class_name.from_pretrained('google/vit-base-patch16-224')
_A : Optional[Any] = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
| 227
|
'''simple docstring'''
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda a_ : x % 36 )
a = numpy.vectorize(a_ )
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : numpy.ndarray):
_A : Union[str, Any] = self.modulus(SCREAMING_SNAKE_CASE) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : Optional[int] = encrypt_key.shape[0]
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
return self.key_string.index(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
return self.key_string[round(SCREAMING_SNAKE_CASE)]
def A ( self : List[Any]):
_A : Optional[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : int = det % len(self.key_string)
_A : str = len(self.key_string)
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string)) != 1:
_A : Optional[int] = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE)
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : List[Any] = [char for char in text.upper() if char in self.key_string]
_A : List[str] = chars[-1]
while len(SCREAMING_SNAKE_CASE) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE)
return "".join(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.process_text(text.upper())
_A : List[str] = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[Any] = text[i : i + self.break_key]
_A : Optional[int] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : List[str] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[
0
]
_A : str = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def A ( self : Union[str, Any]):
_A : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : Optional[int] = det % len(self.key_string)
_A : List[str] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_A : Union[str, Any] = i
break
_A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : str):
_A : List[str] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper())
_A : str = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[int] = text[i : i + self.break_key]
_A : Union[str, Any] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : Optional[int] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[0]
_A : Tuple = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
_A : List[Any] = int(input('Enter the order of the encryption key: ' ) )
_A : List[str] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowerCamelCase ):
_A : str = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
_A : Dict = HillCipher(numpy.array(lowerCamelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_A : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_A : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
_A : int = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_UpperCamelCase : Optional[int] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCAmelCase ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase_ : Union[str, Any] = get_sagemaker_input()
else:
UpperCAmelCase_ : Optional[int] = get_cluster_input()
return config
def __UpperCAmelCase ( A : Optional[int]=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase_ : Tuple = subparsers.add_parser('''config''' , description=A )
else:
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser('''Accelerate config command''' , description=A )
parser.add_argument(
'''--config_file''' , default=A , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A )
return parser
def __UpperCAmelCase ( A : Tuple ) -> List[Any]:
UpperCAmelCase_ : int = get_user_input()
if args.config_file is not None:
UpperCAmelCase_ : Optional[int] = args.config_file
else:
if not os.path.isdir(A ):
os.makedirs(A )
UpperCAmelCase_ : Optional[int] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(A )
else:
config.to_yaml_file(A )
print(F"accelerate configuration saved at {config_file}" )
def __UpperCAmelCase ( ) -> Dict:
UpperCAmelCase_ : List[Any] = config_command_parser()
UpperCAmelCase_ : List[str] = parser.parse_args()
config_command(A )
if __name__ == "__main__":
main()
| 304
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 1
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase : Tuple = getLogger(__name__)
lowerCAmelCase : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 8 , _UpperCAmelCase = DEFAULT_DEVICE , _UpperCAmelCase=False , _UpperCAmelCase="summarization" , _UpperCAmelCase=None , **_UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = Path(_UpperCAmelCase ).open("w" , encoding="utf-8" )
SCREAMING_SNAKE_CASE_: List[str] = str(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if fpaa:
SCREAMING_SNAKE_CASE_: Optional[int] = model.half()
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE_: List[Any] = time.time()
# update config with task specific params
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase )
if prefix is None:
SCREAMING_SNAKE_CASE_: List[str] = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(_UpperCAmelCase , _UpperCAmelCase ) ) ):
SCREAMING_SNAKE_CASE_: Optional[int] = [prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE_: Dict = tokenizer(_UpperCAmelCase , return_tensors="pt" , truncation=_UpperCAmelCase , padding="longest" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE_: str = int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE_: str = len(_UpperCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A_ ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def A_ ( _UpperCAmelCase=True ):
SCREAMING_SNAKE_CASE_: int = argparse.ArgumentParser()
parser.add_argument("model_name" , type=_UpperCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=_UpperCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=_UpperCAmelCase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=_UpperCAmelCase , required=_UpperCAmelCase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=_UpperCAmelCase , required=_UpperCAmelCase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=_UpperCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help="batch size" )
parser.add_argument(
"--n_obs" , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=_UpperCAmelCase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_known_args()
SCREAMING_SNAKE_CASE_: Any = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
SCREAMING_SNAKE_CASE_: Tuple = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE_: List[Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
SCREAMING_SNAKE_CASE_: List[Any] = generate_summaries_or_translations(
_UpperCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE_: Dict = calculate_bleu if "translation" in args.task else calculate_rouge
SCREAMING_SNAKE_CASE_: int = [x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE_: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCAmelCase )]
SCREAMING_SNAKE_CASE_: dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
scores.update(_UpperCAmelCase )
if args.dump_args:
scores.update(_UpperCAmelCase )
if args.info:
SCREAMING_SNAKE_CASE_: Optional[Any] = args.info
if verbose:
print(_UpperCAmelCase )
if args.score_path is not None:
json.dump(_UpperCAmelCase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 127
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : float , lowerCAmelCase__ : Callable , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : str = None , ):
super().__init__()
SCREAMING_SNAKE_CASE_: str = initial_learning_rate
SCREAMING_SNAKE_CASE_: Dict = warmup_steps
SCREAMING_SNAKE_CASE_: Any = power
SCREAMING_SNAKE_CASE_: int = decay_schedule_fn
SCREAMING_SNAKE_CASE_: Union[str, Any] = name
def __call__( self : Optional[Any] , lowerCAmelCase__ : Any):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE_: Any = tf.cast(lowerCAmelCase__ , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[Any] = tf.cast(self.warmup_steps , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCAmelCase__ , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 0.9 , _UpperCAmelCase = 0.9_9_9 , _UpperCAmelCase = 1e-8 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_UpperCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE_: Tuple = WarmUp(
initial_learning_rate=_UpperCAmelCase , decay_schedule_fn=_UpperCAmelCase , warmup_steps=_UpperCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE_: List[str] = AdamWeightDecay(
learning_rate=_UpperCAmelCase , weight_decay_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_UpperCAmelCase , )
else:
SCREAMING_SNAKE_CASE_: int = tf.keras.optimizers.Adam(
learning_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowerCAmelCase__ : float = 0.9 , lowerCAmelCase__ : float = 0.999 , lowerCAmelCase__ : float = 1E-7 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "AdamWeightDecay" , **lowerCAmelCase__ : int , ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = weight_decay_rate
SCREAMING_SNAKE_CASE_: List[Any] = include_in_weight_decay
SCREAMING_SNAKE_CASE_: List[Any] = exclude_from_weight_decay
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: List[str] = {"WarmUp": WarmUp}
return super(lowerCAmelCase__ , cls).from_config(lowerCAmelCase__ , custom_objects=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
super(lowerCAmelCase__ , self)._prepare_local(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: str = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = list(zip(*lowerCAmelCase__))
return super(lowerCAmelCase__ , self).apply_gradients(zip(lowerCAmelCase__ , lowerCAmelCase__) , name=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE_: Dict = apply_state or {}
SCREAMING_SNAKE_CASE_: List[str] = apply_state.get((var_device, var_dtype))
if coefficients is None:
SCREAMING_SNAKE_CASE_: Optional[int] = self._fallback_apply_state(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_dense(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_sparse(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return False
return True
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: Any = None
@property
def _SCREAMING_SNAKE_CASE ( self : int):
if self._accum_steps is None:
SCREAMING_SNAKE_CASE_: Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , lowerCAmelCase__ : Tuple):
if not self._gradients:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase__) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(lowerCAmelCase__) != len(self._gradients):
raise ValueError(F"Expected {len(self._gradients)} gradients, but got {len(lowerCAmelCase__)}")
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase__):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase__)
self._accum_steps.assign_add(1)
def _SCREAMING_SNAKE_CASE ( self : int):
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase__))
| 127
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.