code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import pytest
__UpperCamelCase : Optional[Any] = '__dummy_dataset1__'
__UpperCamelCase : int = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _a ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Tuple = dataset_loading_script_name
UpperCamelCase__ : Optional[Any] = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=lowerCamelCase_ )
UpperCamelCase__ : str = script_dir / F"{script_name}.py"
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
| 146
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, *lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Any=None, **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowercase__ ( self : int, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str = "eval" ):
'''simple docstring'''
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(lowerCamelCase )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase )
return metrics
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int=None, lowerCamelCase : str = "test" ):
'''simple docstring'''
lowercase__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions, '''predict''' )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase )
| 207
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class lowercase_ ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowerCamelCase = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase = Features({'question': Value('string' ), 'context': Value('string' )} )
_lowerCamelCase = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
_lowerCamelCase = 'question'
_lowerCamelCase = 'context'
_lowerCamelCase = 'answers'
@property
def UpperCamelCase ( self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 362
|
def snake_case () -> Dict:
'''simple docstring'''
_snake_case : List[str] = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 284
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = BlipImageProcessor()
lowercase__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowercase__ = BlipaProcessor(lowerCamelCase, lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str], **lowerCamelCase : int ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase ).tokenizer
def lowercase__ ( self : Optional[Any], **lowerCamelCase : Any ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase ).image_processor
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = BlipaProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = BlipaProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipaProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipaProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase, return_token_type_ids=lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipaProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipaProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = BlipaProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 207
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''num_attention_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : str=13, lowerCamelCase : Union[str, Any]=64, lowerCamelCase : str=3, lowerCamelCase : int=3, lowerCamelCase : Dict=2, lowerCamelCase : int=1, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Dict=[128, 256, 384], lowerCamelCase : Tuple=[4, 6, 8], lowerCamelCase : Optional[Any]=[2, 3, 4], lowerCamelCase : str=[16, 16, 16], lowerCamelCase : Dict=0, lowerCamelCase : List[str]=[2, 2, 2], lowerCamelCase : str=[2, 2, 2], lowerCamelCase : List[Any]=0.02, lowerCamelCase : Any=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=2, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = LevitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowercase__ = problem_type['''title''']
lowercase__ = problem_type['''num_labels''']
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase ) as warning_list:
lowercase__ = model(**lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : int ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207
| 1
|
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case = set()
return any(
node not in visited and depth_first_search(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
for node in graph )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
visited.add(UpperCamelCase_ )
rec_stk.add(UpperCamelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 213
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE : str = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def UpperCAmelCase__ (UpperCamelCase_ = "mumbai" ):
"""simple docstring"""
snake_case = BeautifulSoup(requests.get(url + location ).content ,'''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' ,attrs={'''data-tn-component''': '''organicJob'''} ):
snake_case = job.find('''a''' ,attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
snake_case = job.find('''span''' ,{'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 213
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = 3
_lowerCamelCase : str = 2_5_0
_lowerCamelCase : Optional[Any] = ids_tensor((batch_size, length) , __lowerCAmelCase )
_lowerCamelCase : List[str] = torch.ones((batch_size, length) , device=__lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self._get_tensors(5 )
_lowerCamelCase : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Any = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : int = self._get_tensors(1_0 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowerCamelCase , _lowerCamelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : str = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self._get_tensors(5 )
_lowerCamelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : Dict = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(__lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
_lowerCamelCase : Dict = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
| 72
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str = " " ) -> list:
"""simple docstring"""
snake_case = []
snake_case = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 149
|
"""simple docstring"""
import os
def lowerCAmelCase__ ( _UpperCamelCase : str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as in_file:
snake_case = in_file.read()
snake_case = [[int(_UpperCamelCase ) for cell in row.split(',' )] for row in data.strip().splitlines()]
snake_case = [[0 for cell in row] for row in grid]
snake_case = len(grid[0] )
snake_case = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
snake_case = grid[0][0]
for i in range(1 , _UpperCamelCase ):
snake_case = grid[0][i] + dp[0][i - 1]
for i in range(1 , _UpperCamelCase ):
snake_case = grid[i][0] + dp[i - 1][0]
for i in range(1 , _UpperCamelCase ):
for j in range(1 , _UpperCamelCase ):
snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 149
| 1
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ) -> Any:
_a : Dict = 10
_a : Union[str, Any] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_a : Optional[int] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCAmelCase_ ) ),
} , features=lowerCAmelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCAmelCase_ )
return filename
# FILE_CONTENT + files
__lowerCAmelCase = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_a : Optional[Any] = FILE_CONTENT
with open(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ )
return filename
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
import bza
_a : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_a : Any = bytes(lowerCAmelCase_ , 'utf-8' )
with bza.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
import gzip
_a : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_a : Union[str, Any] = bytes(lowerCAmelCase_ , 'utf-8' )
with gzip.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a : List[str] = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_a : Optional[int] = bytes(lowerCAmelCase_ , 'utf-8' )
with lza.frame.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCAmelCase_ , 'w' ) as archive:
archive.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
import tarfile
_a : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCAmelCase_ , 'w' ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
import lzma
_a : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_a : int = bytes(lowerCAmelCase_ , 'utf-8' )
with lzma.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
import zipfile
_a : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_a : Optional[Any] = bytes(lowerCAmelCase_ , 'utf-8' )
with zstd.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
_a : Any = tmp_path_factory.mktemp('data' ) / 'file.xml'
_a : int = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ )
return filename
__lowerCAmelCase = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Dict = datasets.Dataset.from_dict(lowerCAmelCase_ )
_a : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCAmelCase_ ) ) as con:
_a : Tuple = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
_a : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCAmelCase_ , 'w' , newline='' ) as f:
_a : Dict = csv.DictWriter(lowerCAmelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
_a : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCAmelCase_ , 'w' , newline='' ) as f:
_a : int = csv.DictWriter(lowerCAmelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
import bza
_a : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCAmelCase_ , 'rb' ) as f:
_a : Optional[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_a : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_a : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_a : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
_a : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_a : Optional[int] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCAmelCase_ , 'wb' ) as f:
_a : Optional[int] = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_ )
_a : List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase_ ) )] for k in DATA[0]} , schema=lowerCAmelCase_ )
writer.write_table(lowerCAmelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_a : Tuple = {'data': DATA}
with open(lowerCAmelCase_ , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
_a : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_a : Optional[int] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCAmelCase_ , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[int]:
_a : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
_a : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
_a : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
import gzip
_a : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCAmelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCAmelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
import gzip
_a : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCAmelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCAmelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_a : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCAmelCase_ , 'w' ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_a : Any = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCAmelCase_ , 'w' ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
_a : Any = ['0', '1', '2', '3']
_a : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
_a : List[Any] = ['0', '1', '2', '3']
_a : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
_a : List[str] = ['0', '1', '2', '3']
_a : Any = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCAmelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_a : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_a : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : int = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_a : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ) -> List[str]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ) -> Union[str, Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
_a : Tuple = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89
|
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89
| 1
|
from __future__ import annotations
def __snake_case ( _UpperCAmelCase ):
__a = str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set('''123456789''' )
def __snake_case ( ):
for base_num in range(9999 , 4999 , -1 ):
__a = 100002 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__a = 1002003 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 371
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__snake_case :int = logging.get_logger(__name__)
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if os.path.isfile(__SCREAMING_SNAKE_CASE):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''question_encoder_tokenizer''')
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''generator_tokenizer''')
self.question_encoder.save_pretrained(__SCREAMING_SNAKE_CASE)
self.generator.save_pretrained(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : List[Any] , __SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE)
if config is None:
__a = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
def __call__( self : Dict , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.current_tokenizer(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return self.generator.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
return self.generator.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.question_encoder
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.generator
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "longest" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __SCREAMING_SNAKE_CASE , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = labels['''input_ids''']
return model_inputs
| 131
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ ={
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216
|
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284
| 0
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 1
|
"""simple docstring"""
from math import pow, sqrt
def lowercase__( *__SCREAMING_SNAKE_CASE : float ):
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 213
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def _UpperCAmelCase ( *__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : int = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowercase_ : Optional[int] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Tuple = object_detector(examples[0] ,threshold=0.0 )
lowercase_ : Any = len(__UpperCamelCase )
self.assertGreater(__UpperCamelCase ,0 )
self.assertEqual(
__UpperCamelCase ,[
{
'score': ANY(__UpperCamelCase ),
'label': ANY(__UpperCamelCase ),
'box': {'xmin': ANY(__UpperCamelCase ), 'ymin': ANY(__UpperCamelCase ), 'xmax': ANY(__UpperCamelCase ), 'ymax': ANY(__UpperCamelCase )},
}
for i in range(__UpperCamelCase )
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowercase_ : Optional[int] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] ,)
lowercase_ : Union[str, Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] ,)
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = pipeline('zero-shot-object-detection' )
lowercase_ : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] ,)
lowercase_ : Union[str, Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = 0.2
lowercase_ : Any = pipeline('zero-shot-object-detection' )
lowercase_ : str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] ,)
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = 2
lowercase_ : Optional[Any] = pipeline('zero-shot-object-detection' )
lowercase_ : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,top_k=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] ,)
| 213
| 1
|
'''simple docstring'''
UpperCAmelCase : Any = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def a__ ( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(a__ )
__SCREAMING_SNAKE_CASE = """""".join(bin(a__ )[2:].zfill(8 ) for byte in data )
__SCREAMING_SNAKE_CASE = len(a__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__SCREAMING_SNAKE_CASE = B"""=""" * ((6 - len(a__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(a__ ) % 6)
else:
__SCREAMING_SNAKE_CASE = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(a__ ) , 6 ) ).encode()
+ padding
)
def a__ ( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ) and not isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE = (
"""argument should be a bytes-like object or ASCII string, """
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(a__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(a__ , a__ ):
try:
__SCREAMING_SNAKE_CASE = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__SCREAMING_SNAKE_CASE = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(a__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__SCREAMING_SNAKE_CASE = encoded_data[:-padding]
__SCREAMING_SNAKE_CASE = """""".join(
bin(B64_CHARSET.index(a__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__SCREAMING_SNAKE_CASE = """""".join(
bin(B64_CHARSET.index(a__ ) )[2:].zfill(6 ) for char in encoded_data )
__SCREAMING_SNAKE_CASE = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(a__ ) , 8 )
]
return bytes(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 331
|
'''simple docstring'''
import os
def a__ ( a__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
UpperCamelCase__: Optional[int] = {
"input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase__: Optional[int] = model(__lowerCamelCase )["last_hidden_state"]
UpperCamelCase__: int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice.
UpperCamelCase__: Tuple = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 149
|
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = '''RegNetConfig'''
# Base docstring
UpperCAmelCase = '''facebook/regnet-y-040'''
UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase = '''facebook/regnet-y-040'''
UpperCAmelCase = '''tabby, tabby cat'''
UpperCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
def __init__( self :Dict ,__lowercase :int ,__lowercase :int = 3 ,__lowercase :int = 1 ,__lowercase :int = 1 ,__lowercase :Optional[str] = "relu" ,**__lowercase :Tuple ,):
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case__ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case__ : Optional[int] = tf.keras.layers.ConvaD(
filters=__lowercase ,kernel_size=__lowercase ,strides=__lowercase ,padding='''VALID''' ,groups=__lowercase ,use_bias=__lowercase ,name='''convolution''' ,)
snake_case__ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' )
snake_case__ : Dict = ACTaFN[activation] if activation is not None else tf.identity
def __lowerCamelCase ( self :Dict ,__lowercase :Optional[Any] ):
snake_case__ : Optional[Any] = self.convolution(self.padding(__lowercase ) )
snake_case__ : Tuple = self.normalization(__lowercase )
snake_case__ : List[Any] = self.activation(__lowercase )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self :int ,__lowercase :RegNetConfig ,**__lowercase :Optional[Any] ):
super().__init__(**__lowercase )
snake_case__ : Dict = config.num_channels
snake_case__ : Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,)
def __lowerCamelCase ( self :Tuple ,__lowercase :Any ):
snake_case__ : List[str] = shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case__ : int = tf.transpose(__lowercase ,perm=(0, 2, 3, 1) )
snake_case__ : Optional[int] = self.embedder(__lowercase )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self :Any ,__lowercase :int ,__lowercase :int = 2 ,**__lowercase :Optional[Any] ):
super().__init__(**__lowercase )
snake_case__ : List[str] = tf.keras.layers.ConvaD(
filters=__lowercase ,kernel_size=1 ,strides=__lowercase ,use_bias=__lowercase ,name='''convolution''' )
snake_case__ : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :tf.Tensor ,__lowercase :bool = False ):
return self.normalization(self.convolution(__lowercase ) ,training=__lowercase )
class a ( tf.keras.layers.Layer ):
def __init__( self :Any ,__lowercase :int ,__lowercase :int ,**__lowercase :int ):
super().__init__(**__lowercase )
snake_case__ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase ,name='''pooler''' )
snake_case__ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=__lowercase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__lowercase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ),
]
def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[int] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
snake_case__ : Dict = self.pooler(__lowercase )
for layer_module in self.attention:
snake_case__ : int = layer_module(__lowercase )
snake_case__ : Union[str, Any] = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self :List[Any] ,__lowercase :RegNetConfig ,__lowercase :int ,__lowercase :int ,__lowercase :int = 1 ,**__lowercase :int ):
super().__init__(**__lowercase )
snake_case__ : int = in_channels != out_channels or stride != 1
snake_case__ : Optional[Any] = max(1 ,out_channels // config.groups_width )
snake_case__ : Union[str, Any] = (
TFRegNetShortCut(__lowercase ,stride=__lowercase ,name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case__ : int = [
TFRegNetConvLayer(__lowercase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase ,stride=__lowercase ,groups=__lowercase ,activation=config.hidden_act ,name='''layer.1''' ),
TFRegNetConvLayer(__lowercase ,kernel_size=1 ,activation=__lowercase ,name='''layer.2''' ),
]
snake_case__ : int = ACTaFN[config.hidden_act]
def __lowerCamelCase ( self :List[str] ,__lowercase :Any ):
snake_case__ : Dict = hidden_state
for layer_module in self.layers:
snake_case__ : Union[str, Any] = layer_module(__lowercase )
snake_case__ : Union[str, Any] = self.shortcut(__lowercase )
hidden_state += residual
snake_case__ : str = self.activation(__lowercase )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self :List[Any] ,__lowercase :RegNetConfig ,__lowercase :int ,__lowercase :int ,__lowercase :int = 1 ,**__lowercase :List[str] ):
super().__init__(**__lowercase )
snake_case__ : List[str] = in_channels != out_channels or stride != 1
snake_case__ : int = max(1 ,out_channels // config.groups_width )
snake_case__ : Any = (
TFRegNetShortCut(__lowercase ,stride=__lowercase ,name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' )
)
snake_case__ : int = [
TFRegNetConvLayer(__lowercase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase ,stride=__lowercase ,groups=__lowercase ,activation=config.hidden_act ,name='''layer.1''' ),
TFRegNetSELayer(__lowercase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ),
TFRegNetConvLayer(__lowercase ,kernel_size=1 ,activation=__lowercase ,name='''layer.3''' ),
]
snake_case__ : Optional[Any] = ACTaFN[config.hidden_act]
def __lowerCamelCase ( self :int ,__lowercase :List[Any] ):
snake_case__ : Optional[int] = hidden_state
for layer_module in self.layers:
snake_case__ : str = layer_module(__lowercase )
snake_case__ : Optional[Any] = self.shortcut(__lowercase )
hidden_state += residual
snake_case__ : Union[str, Any] = self.activation(__lowercase )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self :Union[str, Any] ,__lowercase :RegNetConfig ,__lowercase :int ,__lowercase :int ,__lowercase :int = 2 ,__lowercase :int = 2 ,**__lowercase :Any ):
super().__init__(**__lowercase )
snake_case__ : Optional[int] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
snake_case__ : int = [
# downsampling is done in the first layer with stride of 2
layer(__lowercase ,__lowercase ,__lowercase ,stride=__lowercase ,name='''layers.0''' ),
*[layer(__lowercase ,__lowercase ,__lowercase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __lowerCamelCase ( self :str ,__lowercase :Dict ):
for layer_module in self.layers:
snake_case__ : List[str] = layer_module(__lowercase )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self :Optional[Any] ,__lowercase :RegNetConfig ,**__lowercase :List[Any] ):
super().__init__(**__lowercase )
snake_case__ : Optional[int] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) )
snake_case__ : Dict = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase ,__lowercase ,__lowercase ,depth=__lowercase ,name=F"""stages.{i+1}""" ) )
def __lowerCamelCase ( self :str ,__lowercase :tf.Tensor ,__lowercase :bool = False ,__lowercase :bool = True ):
snake_case__ : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case__ : List[str] = hidden_states + (hidden_state,)
snake_case__ : Optional[Any] = stage_module(__lowercase )
if output_hidden_states:
snake_case__ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase ,hidden_states=__lowercase )
@keras_serializable
class a ( tf.keras.layers.Layer ):
__lowerCAmelCase : Dict = RegNetConfig
def __init__( self :List[Any] ,__lowercase :Optional[int] ,**__lowercase :Dict ):
super().__init__(**__lowercase )
snake_case__ : Optional[int] = config
snake_case__ : List[str] = TFRegNetEmbeddings(__lowercase ,name='''embedder''' )
snake_case__ : Dict = TFRegNetEncoder(__lowercase ,name='''encoder''' )
snake_case__ : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase ,name='''pooler''' )
@unpack_inputs
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :tf.Tensor ,__lowercase :Optional[bool] = None ,__lowercase :Optional[bool] = None ,__lowercase :bool = False ,):
snake_case__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : str = self.embedder(__lowercase ,training=__lowercase )
snake_case__ : Tuple = self.encoder(
__lowercase ,output_hidden_states=__lowercase ,return_dict=__lowercase ,training=__lowercase )
snake_case__ : int = encoder_outputs[0]
snake_case__ : Dict = self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
snake_case__ : Optional[int] = tf.transpose(__lowercase ,perm=(0, 3, 1, 2) )
snake_case__ : Optional[Any] = tf.transpose(__lowercase ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case__ : Optional[int] = tuple([tf.transpose(__lowercase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase ,pooler_output=__lowercase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[Any] = RegNetConfig
__lowerCAmelCase : List[str] = """regnet"""
__lowerCAmelCase : Any = """pixel_values"""
@property
def __lowerCamelCase ( self :Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa )}
UpperCAmelCase = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __lowerCamelCase , )
class a ( __lowerCamelCase ):
def __init__( self :Any ,__lowercase :RegNetConfig ,*__lowercase :Dict ,**__lowercase :Union[str, Any] ):
super().__init__(__lowercase ,*__lowercase ,**__lowercase )
snake_case__ : Tuple = TFRegNetMainLayer(__lowercase ,name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowercase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :tf.Tensor ,__lowercase :Optional[bool] = None ,__lowercase :Optional[bool] = None ,__lowercase :Tuple=False ,):
snake_case__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Tuple = self.regnet(
pixel_values=__lowercase ,output_hidden_states=__lowercase ,return_dict=__lowercase ,training=__lowercase ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __lowerCamelCase , )
class a ( __lowerCamelCase , __lowerCamelCase ):
def __init__( self :str ,__lowercase :RegNetConfig ,*__lowercase :str ,**__lowercase :str ):
super().__init__(__lowercase ,*__lowercase ,**__lowercase )
snake_case__ : List[Any] = config.num_labels
snake_case__ : Any = TFRegNetMainLayer(__lowercase ,name='''regnet''' )
# classification head
snake_case__ : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowercase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :tf.Tensor = None ,__lowercase :tf.Tensor = None ,__lowercase :bool = None ,__lowercase :bool = None ,__lowercase :List[Any]=False ,):
snake_case__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Optional[int] = self.regnet(
__lowercase ,output_hidden_states=__lowercase ,return_dict=__lowercase ,training=__lowercase )
snake_case__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
snake_case__ : str = self.classifier[0](__lowercase )
snake_case__ : List[Any] = self.classifier[1](__lowercase )
snake_case__ : List[Any] = None if labels is None else self.hf_compute_loss(labels=__lowercase ,logits=__lowercase )
if not return_dict:
snake_case__ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase ,logits=__lowercase ,hidden_states=outputs.hidden_states )
| 370
|
def _lowerCAmelCase ( __lowerCAmelCase = 50 ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44
| 0
|
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _a ( _lowercase):
_a : Any = VOCAB_FILES_NAMES
_a : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( _lowercase):
_a : int = VOCAB_FILES_NAMES
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowercase)
class _a :
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , **_SCREAMING_SNAKE_CASE : str , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
lowerCAmelCase__ : Tuple = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = titles if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [titles]
lowerCAmelCase__ : Dict = texts if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [texts]
lowerCAmelCase__ : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = questions if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.' )
lowerCAmelCase__ : Union[str, Any] = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : str = super().__call__(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : Dict = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ : Tuple = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : BatchEncoding , _SCREAMING_SNAKE_CASE : DPRReaderOutput , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : int = 64 , _SCREAMING_SNAKE_CASE : int = 4 , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Optional[int] = reader_input['''input_ids''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader_output[:3]
lowerCAmelCase__ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = sorted(range(_SCREAMING_SNAKE_CASE ) , reverse=_SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
lowerCAmelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCAmelCase__ : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ : Any = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_SCREAMING_SNAKE_CASE , top_spans=_SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_SCREAMING_SNAKE_CASE , start_index=_SCREAMING_SNAKE_CASE , end_index=_SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Union[str, Any] = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] , reverse=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowerCAmelCase__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowercase)
class _a ( _lowercase , _lowercase):
_a : List[str] = VOCAB_FILES_NAMES
_a : str = READER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
| 131
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
else:
_lowerCamelCase : str = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase : Any = ProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
_lowerCamelCase : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_lowerCamelCase : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_lowerCamelCase : Union[str, Any] = key.split('''.''' )
if attributes[0] == "lm_head":
_lowerCamelCase : str = prophet
_lowerCamelCase : List[Any] = prophet_old
else:
_lowerCamelCase : Optional[int] = prophet.prophetnet
_lowerCamelCase : Optional[Any] = prophet_old.model
_lowerCamelCase : Any = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase : Optional[int] = mapping[attribute]
if not hasattr(A_, A_ ) and len(A_ ) > 0:
_lowerCamelCase : int = attribute
elif hasattr(A_, A_ ):
_lowerCamelCase : int = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase : Optional[int] = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_lowerCamelCase : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase : int = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_lowerCamelCase : Union[str, Any] = True
break
elif attribute in special_keys and hasattr(A_, '''in_proj_weight''' ):
_lowerCamelCase : Tuple = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase : List[Any] = getattr(A_, A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_lowerCamelCase : List[str] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_lowerCamelCase : Optional[Any] = True
break
if attribute.isdigit():
_lowerCamelCase : Optional[int] = model[int(A_ )]
_lowerCamelCase : List[Any] = old_model[int(A_ )]
else:
_lowerCamelCase : List[str] = getattr(A_, A_ )
if old_attribute == "":
_lowerCamelCase : str = old_model
else:
if not hasattr(A_, A_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_lowerCamelCase : Optional[int] = getattr(A_, A_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 351
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class __snake_case ( _lowercase):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
_lowerCamelCase : Dict = None
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_lowerCamelCase : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCamelCase : Dict = str(distributed_port + 1 )
_lowerCamelCase : str = dist.new_group(ranks=__lowerCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=torch.floataa ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase )
dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group )
return target_tensor
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCamelCase : str = next((addr for addr in addrs if addr.startswith('''e''' )) , __lowerCAmelCase )
return ifname
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
if not dist.is_initialized():
_lowerCamelCase , _lowerCamelCase : Any = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
# distributed training
_lowerCamelCase : Dict = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCamelCase : str = None
if self._is_main():
_lowerCamelCase : List[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )]
dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group )
# scatter logic
_lowerCamelCase : int = question_hidden_states.shape[0]
_lowerCamelCase : str = []
_lowerCamelCase : Optional[int] = []
if self._is_main():
assert len(__lowerCAmelCase ) == world_size
_lowerCamelCase , _lowerCamelCase : Tuple = self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCamelCase : str = self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
| 175
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__="attention" ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
_SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def __lowerCamelCase ( snake_case__ ,*, snake_case__ ,snake_case__ ,snake_case__ = False ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables["""target"""] )
_SCREAMING_SNAKE_CASE = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_SCREAMING_SNAKE_CASE = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
_SCREAMING_SNAKE_CASE = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,"""pre_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,"""attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,"""pre_mlp_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = wi[0].T
_SCREAMING_SNAKE_CASE = wi[1].T
else:
_SCREAMING_SNAKE_CASE = wi.T
_SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
snake_case__ ,snake_case__ ,"""encoder""" ).T
_SCREAMING_SNAKE_CASE = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
snake_case__ ,0 ,"""encoder""" ).T
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
snake_case__ ,0 ,"""decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""pre_self_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""self_attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""pre_cross_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""encoder_decoder_attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""pre_mlp_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = wi[0].T
_SCREAMING_SNAKE_CASE = wi[1].T
else:
_SCREAMING_SNAKE_CASE = wi.T
_SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(snake_case__ ,snake_case__ ,"""decoder""" ).T
_SCREAMING_SNAKE_CASE = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_SCREAMING_SNAKE_CASE = old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
return state_dict
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(snake_case__ )
_SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(
snake_case__ ,num_layers=config.num_layers ,is_encoder_only=snake_case__ ,scalable_attention=snake_case__ )
_SCREAMING_SNAKE_CASE = make_state_dict(snake_case__ ,snake_case__ )
model.load_state_dict(snake_case__ ,strict=snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = False ,snake_case__ = False ,) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_SCREAMING_SNAKE_CASE = UMTaEncoderModel(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
import itertools
import os
import re
lowerCAmelCase__ = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowerCAmelCase__ = re.compile(r'''([a-z\d])([A-Z])''')
lowerCAmelCase__ = re.compile(r'''(?<!_)_(?!_)''')
lowerCAmelCase__ = re.compile(r'''(_{2,})''')
lowerCAmelCase__ = r'''^\w+(\.\w+)*$'''
lowerCAmelCase__ = r'''<>:/\|?*'''
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _uppercase_uppercase_re.sub(r"\1_\2" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = _lowercase_uppercase_re.sub(r"\1_\2" , _SCREAMING_SNAKE_CASE )
return name.lower()
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _single_underscore_re.split(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [_multiple_underscores_re.split(_SCREAMING_SNAKE_CASE ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) if n != "" )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _SCREAMING_SNAKE_CASE ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(_SCREAMING_SNAKE_CASE )}-{split}"
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return F"{filepath}*"
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if shard_lengths:
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_SCREAMING_SNAKE_CASE )]
if filetype_suffix:
UpperCamelCase = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCamelCase = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 360
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
lowerCAmelCase__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=None , __a=None , __a=None , __a = None , __a=None , **__a , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , tokenizer_file=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase = 1
UpperCamelCase = len(self.sp_model )
UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase = src_lang if src_lang is not None else "en_XX"
UpperCamelCase = self.lang_code_to_id[self._src_lang]
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , __a ) -> Tuple:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ (self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ (self , __a ) -> None:
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
UpperCamelCase = [1] * len(self.prefix_tokens )
UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , __a , __a , __a , __a , **__a ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase = src_lang
UpperCamelCase = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
UpperCamelCase = self.convert_tokens_to_ids(__a )
UpperCamelCase = tgt_lang_id
return inputs
def snake_case_ (self ) -> List[str]:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ (self , __a ) -> Optional[int]:
UpperCamelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def snake_case_ (self , __a , __a = "en_XX" , __a = None , __a = "ro_RO" , **__a , ) -> BatchEncoding:
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def snake_case_ (self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ (self ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ (self , __a ) -> None:
UpperCamelCase = self.lang_code_to_id[src_lang]
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
def snake_case_ (self , __a ) -> None:
UpperCamelCase = self.lang_code_to_id[lang]
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
| 244
| 0
|
'''simple docstring'''
lowerCAmelCase :Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def lowerCamelCase ( lowerCAmelCase : bytes ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Dict = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowerCAmelCase )
__magic_name__ : Any = ''.join(bin(lowerCAmelCase )[2:].zfill(8 ) for byte in data )
__magic_name__ : Optional[Any] = len(lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : Union[str, Any] = b'=' * ((6 - len(lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase ) % 6)
else:
__magic_name__ : Optional[int] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) and not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : List[str] = (
'argument should be a bytes-like object or ASCII string, '
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase , lowerCAmelCase ):
try:
__magic_name__ : Dict = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__magic_name__ : Union[str, Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : str = encoded_data[:-padding]
__magic_name__ : Optional[Any] = ''.join(
bin(B64_CHARSET.index(lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Any = ''.join(
bin(B64_CHARSET.index(lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : Dict = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase ) , 8 )
]
return bytes(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 331
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331
| 1
|
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : torch.FloatTensor
UpperCAmelCase : Optional[torch.FloatTensor] = None
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int]=0.999 , _snake_case : Any="cosine" , ) -> Optional[int]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_A = []
for i in range(_snake_case ):
_A = i / num_diffusion_timesteps
_A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = 1
@register_to_config
def __init__( self : int , _UpperCAmelCase : int = 1_000 , _UpperCAmelCase : float = 0.0001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : int = 0 , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : float = 1.0 , **_UpperCAmelCase : Tuple , ):
if kwargs.get('set_alpha_to_one' , _UpperCAmelCase ) is not None:
_A = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['set_alpha_to_one']
if trained_betas is not None:
_A = torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_A = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A = betas_for_alpha_bar(_UpperCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_A = 1.0 - self.betas
_A = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_A = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_A = 1.0
# setable values
_A = None
_A = torch.from_numpy(np.arange(0 , _UpperCAmelCase ).copy().astype(np.intaa ) )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Optional[int] = None ):
return sample
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_A = num_inference_steps
_A = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_A = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase )
self.timesteps += self.config.steps_offset
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : bool = True , ):
# 1. get previous step value (=t+1)
_A = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_A = self.alphas_cumprod[timestep]
_A = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_A = model_output
elif self.config.prediction_type == "sample":
_A = model_output
_A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_A = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_A = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_A = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def __len__( self : Optional[Any] ):
return self.config.num_train_timesteps
| 271
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def _snake_case ( _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> Optional[int]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _snake_case ( _snake_case : str , _snake_case : Any ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
_A = {}
_A = checkpoint['time_embed.0.weight']
_A = checkpoint['time_embed.0.bias']
_A = checkpoint['time_embed.2.weight']
_A = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_A = checkpoint['label_emb.weight']
_A = checkpoint['input_blocks.0.0.weight']
_A = checkpoint['input_blocks.0.0.bias']
_A = unet_config['down_block_types']
_A = unet_config['layers_per_block']
_A = unet_config['attention_head_dim']
_A = unet_config['block_out_channels']
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(_snake_case ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = 'mid_block.resnets.0'
_A = 'middle_block.0'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.attentions.0'
_A = 'middle_block.1'
_A = convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.resnets.1'
_A = 'middle_block.2'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 0
_A = unet_config['up_block_types']
for i, layer_type in enumerate(_snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = checkpoint['out.0.weight']
_A = checkpoint['out.0.bias']
_A = checkpoint['out.2.weight']
_A = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 271
| 1
|
"""simple docstring"""
from functools import lru_cache
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->set:
'''simple docstring'''
a : Optional[int] = 2
a : List[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
return len(unique_prime_factors(_lowerCamelCase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->bool:
'''simple docstring'''
return len(set(_lowerCamelCase ) ) in (0, 1)
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->list:
'''simple docstring'''
a : Any = 2
while True:
# Increment each value of a generated range
a : str = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
a : List[str] = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 4 ) ->int:
'''simple docstring'''
a : Optional[Any] = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 105
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Tuple = GPTSanJapaneseTokenizer
lowerCAmelCase :List[Any] = False
lowerCAmelCase :List[Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def snake_case__ ( self):
super().setUp()
# fmt: off
UpperCAmelCase__ : str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ : Union[str, Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
UpperCAmelCase__ : Tuple = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.emoji_file , """w""") as emoji_writer:
emoji_writer.write(json.dumps(_lowercase))
def snake_case__ ( self , **_lowerCamelCase):
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
UpperCAmelCase__ : Any = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.get_input_output_texts(_lowercase)
UpperCAmelCase__ : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase__ : Optional[int] = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase)
return text, ids
def snake_case__ ( self):
pass # TODO add if relevant
def snake_case__ ( self):
pass # TODO add if relevant
def snake_case__ ( self):
pass # TODO add if relevant
def snake_case__ ( self):
UpperCAmelCase__ : Dict = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : Optional[Any] = """こんにちは、世界。 こんばんは、㔺界。"""
UpperCAmelCase__ : Optional[Any] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
UpperCAmelCase__ : str = tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
# Testing conversion to ids without special tokens
UpperCAmelCase__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
# Testing conversion to ids with special tokens
UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : str = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
UpperCAmelCase__ : List[str] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
UpperCAmelCase__ : str = tokenizer.encode(_lowercase)
UpperCAmelCase__ : int = tokenizer.decode(_lowercase)
self.assertEqual(_lowercase , _lowercase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
# Testing tokenization
UpperCAmelCase__ : List[Any] = """こんにちは、世界。"""
UpperCAmelCase__ : Optional[int] = """こんばんは、㔺界。😀"""
UpperCAmelCase__ : Dict = """こんにちは、世界。こんばんは、世界。😀"""
UpperCAmelCase__ : Tuple = tokenizer.encode(prefix_text + input_text)
UpperCAmelCase__ : int = tokenizer.encode("""""" , prefix_text=prefix_text + input_text)
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode(_lowercase , prefix_text=_lowercase)
UpperCAmelCase__ : Dict = tokenizer.decode(_lowercase)
UpperCAmelCase__ : List[str] = tokenizer.decode(_lowercase)
UpperCAmelCase__ : Tuple = tokenizer.decode(_lowercase)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , _lowercase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
# Testing tokenization
UpperCAmelCase__ : Union[str, Any] = """こんにちは、世界。"""
UpperCAmelCase__ : Dict = """こんばんは、㔺界。😀"""
UpperCAmelCase__ : Union[str, Any] = len(tokenizer.encode(_lowercase)) - 2
UpperCAmelCase__ : List[str] = len(tokenizer.encode(_lowercase)) - 2
UpperCAmelCase__ : int = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ : Any = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ : List[str] = tokenizer(prefix_text + input_text).token_type_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("""""" , prefix_text=prefix_text + input_text).token_type_ids
UpperCAmelCase__ : List[str] = tokenizer(_lowercase , prefix_text=_lowercase).token_type_ids
self.assertListEqual(_lowercase , _lowercase)
self.assertListEqual(_lowercase , _lowercase)
self.assertListEqual(_lowercase , _lowercase)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
UpperCAmelCase__ : int = tokenizer.encode("""あンいワ""")
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""")
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("""いワ""" , prefix_text="""あン""")
self.assertEqual(tokenizer.decode(_lowercase) , tokenizer.decode(_lowercase))
self.assertEqual(tokenizer.decode(_lowercase) , tokenizer.decode(_lowercase))
self.assertNotEqual(_lowercase , _lowercase)
self.assertNotEqual(_lowercase , _lowercase)
self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""")
UpperCAmelCase__ : Optional[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
UpperCAmelCase__ : Optional[int] = tokenizer(_lowercase , padding=_lowercase)
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_encode_plus(_lowercase , padding=_lowercase)
# fmt: off
UpperCAmelCase__ : Optional[int] = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase__ : List[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ : Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _lowercase)
self.assertListEqual(x_token.token_type_ids , _lowercase)
self.assertListEqual(x_token.attention_mask , _lowercase)
self.assertListEqual(x_token_a.input_ids , _lowercase)
self.assertListEqual(x_token_a.token_type_ids , _lowercase)
self.assertListEqual(x_token_a.attention_mask , _lowercase)
def snake_case__ ( self):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case__ ( self):
# tokenizer has no padding token
pass
| 366
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Tuple = use_attention_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = True
lowerCAmelCase :Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxRoFormerModelTester(self)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase)
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
UpperCAmelCase__ : int = jnp.array([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : Union[str, Any] = 5_0000
UpperCAmelCase__ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4))
| 283
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __UpperCAmelCase (datasets.BuilderConfig ):
__snake_case : Tuple = None
def __lowerCamelCase ( snake_case__ ,snake_case__ ,) -> Optional[Any]:
"""simple docstring"""
import pyspark
def generate_fn():
_SCREAMING_SNAKE_CASE = df.select("""*""" ,pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_SCREAMING_SNAKE_CASE = df_with_partition_id.select("""*""" ).where(F'part_id = {partition_id}' ).drop("""part_id""" )
_SCREAMING_SNAKE_CASE = partition_df.collect()
_SCREAMING_SNAKE_CASE = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __UpperCAmelCase (_BaseExamplesIterable ):
def __init__( self: Optional[int] , UpperCAmelCase_: "pyspark.sql.DataFrame" , UpperCAmelCase_: Tuple=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = df
_SCREAMING_SNAKE_CASE = partition_order or range(self.df.rdd.getNumPartitions() )
_SCREAMING_SNAKE_CASE = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self: Optional[int] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def UpperCamelCase ( self: Any , UpperCAmelCase_: np.random.Generator ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.split_shard_indices_by_worker(UpperCAmelCase_ , UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return len(self.partition_order )
class __UpperCAmelCase (datasets.DatasetBuilder ):
__snake_case : Optional[int] = SparkConfig
def __init__( self: List[Any] , UpperCAmelCase_: "pyspark.sql.DataFrame" , UpperCAmelCase_: str = None , UpperCAmelCase_: str = None , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
import pyspark
_SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.getOrCreate()
_SCREAMING_SNAKE_CASE = df
_SCREAMING_SNAKE_CASE = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase_ , )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
def create_cache_and_write_probe(UpperCAmelCase_: str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_SCREAMING_SNAKE_CASE = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCAmelCase_: Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_SCREAMING_SNAKE_CASE = self.df.count()
_SCREAMING_SNAKE_CASE = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_SCREAMING_SNAKE_CASE = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_SCREAMING_SNAKE_CASE = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_SCREAMING_SNAKE_CASE = min(UpperCAmelCase_ , int(approx_total_size / max_shard_size ) )
_SCREAMING_SNAKE_CASE = self.df.repartition(UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: int , ):
'''simple docstring'''
import pyspark
_SCREAMING_SNAKE_CASE = ParquetWriter if file_format == 'parquet' else ArrowWriter
_SCREAMING_SNAKE_CASE = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_SCREAMING_SNAKE_CASE = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_SCREAMING_SNAKE_CASE = self.config.features
_SCREAMING_SNAKE_CASE = self._writer_batch_size
_SCREAMING_SNAKE_CASE = self._fs.storage_options
def write_arrow(UpperCAmelCase_: List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_SCREAMING_SNAKE_CASE = pyspark.TaskContext().taskAttemptId()
_SCREAMING_SNAKE_CASE = next(UpperCAmelCase_ , UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = writer_class(
features=UpperCAmelCase_ , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
_SCREAMING_SNAKE_CASE = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(UpperCAmelCase_ ) , os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = (
self.df.mapInArrow(UpperCAmelCase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCamelCase ( self: int , UpperCAmelCase_: "datasets.SplitGenerator" , UpperCAmelCase_: str = "arrow" , UpperCAmelCase_: Optional[Union[str, int]] = None , UpperCAmelCase_: Optional[int] = None , **UpperCAmelCase_: Any , ):
'''simple docstring'''
self._validate_cache_dir()
_SCREAMING_SNAKE_CASE = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = not is_remote_filesystem(self._fs )
_SCREAMING_SNAKE_CASE = os.path.join if is_local else posixpath.join
_SCREAMING_SNAKE_CASE = '-TTTTT-SSSSS-of-NNNNN'
_SCREAMING_SNAKE_CASE = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_SCREAMING_SNAKE_CASE = path_join(self._output_dir , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
(
_SCREAMING_SNAKE_CASE
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = total_num_examples
_SCREAMING_SNAKE_CASE = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_SCREAMING_SNAKE_CASE = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_SCREAMING_SNAKE_CASE = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , ):
rename(
UpperCAmelCase_ , fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace("""TTTTT-SSSSS""" , F'{global_shard_id:05d}' ).replace("""NNNNN""" , F'{total_shards:05d}' ) , )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
for i in range(len(UpperCAmelCase_ ) ):
_SCREAMING_SNAKE_CASE = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ , len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace(UpperCAmelCase_ , """""" ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 306
|
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
def get_matched_characters(lowerCamelCase : str , lowerCamelCase : str ) -> str:
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase_ : int = int(max(0 , i - limit ) )
UpperCamelCase_ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase )
UpperCamelCase_ : Dict = F"{_stra[0:_stra.index(lowerCamelCase )]} {_stra[_stra.index(lowerCamelCase ) + 1:]}"
return "".join(lowerCamelCase )
# matching characters
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase )
# transposition
UpperCamelCase_ : int = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase , lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase_ : Union[str, Any] = 0.0
else:
UpperCamelCase_ : str = (
1
/ 3
* (
match_count / len(lowerCamelCase )
+ match_count / len(lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase_ : Dict = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 175
| 0
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=4 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.1 , __snake_case=True , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_multiple_size
snake_case = hidden_act
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = weight_tying
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs()
snake_case = True
return config, input_ids, input_mask, token_labels
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXJapaneseModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXJapaneseModel(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXJapaneseForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXJapaneseForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
snake_case = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case = model(__snake_case , attention_mask=__snake_case , output_hidden_states=__snake_case )
snake_case = output_from_no_past['''hidden_states'''][0]
snake_case = model(
__snake_case , attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__magic_name__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = GPTNeoXJapaneseModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
# This regression test was failing with PyTorch < 1.3
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case = None
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__snake_case )
@slow
def a_ ( self ):
snake_case = '''abeja/gpt-neox-japanese-2.7b'''
snake_case = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case = GPTNeoXJapaneseTokenizer.from_pretrained(__snake_case )
snake_case = GPTNeoXJapaneseForCausalLM.from_pretrained(__snake_case )
snake_case = []
for prompt in prompts:
snake_case = tokenizer(__snake_case , return_tensors='''pt''' ).input_ids
snake_case = model.generate(__snake_case , max_length=5_0 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
predicted_outputs += generated_string
self.assertListEqual(__snake_case , __snake_case )
| 359
|
import math
class A__ :
"""simple docstring"""
def a_ ( self , __snake_case , __snake_case ):
snake_case = 0.0
snake_case = 0.0
for i in range(len(__snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case = SelfOrganizingMap()
snake_case = 3
snake_case = 0.5
for _ in range(UpperCamelCase_ ):
for j in range(len(UpperCamelCase_ ) ):
# training sample
snake_case = training_samples[j]
# Compute the winning vector
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# Update the winning vector
snake_case = self_organizing_map.update(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# classify test sample
snake_case = [0, 0, 0, 1]
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 213
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : Union[str, Any] = 42
UpperCAmelCase : List[Any] = jnp.floataa
def _lowercase (self : Any) -> List[str]:
__snake_case : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : int , _A : Tuple) -> List[Any]:
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = hidden_states.shape
__snake_case : Union[str, Any] = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__snake_case : Any = self.conv(SCREAMING_SNAKE_CASE_)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int = 42
UpperCAmelCase : Union[str, Any] = jnp.floataa
def _lowercase (self : Optional[Any]) -> int:
__snake_case : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Union[str, Any] , _A : int) -> Dict:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__snake_case : Dict = self.conv(SCREAMING_SNAKE_CASE_)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int = 42
UpperCAmelCase : Any = None
UpperCAmelCase : Dict = 0.0
UpperCAmelCase : Dict = None
UpperCAmelCase : List[Any] = jnp.floataa
def _lowercase (self : Optional[Any]) -> Tuple:
__snake_case : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : Any = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype)
__snake_case : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : Dict = nn.Dropout(self.dropout_prob)
__snake_case : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case : Dict = None
if use_nin_shortcut:
__snake_case : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__(self : int , _A : List[str] , _A : str , _A : Optional[int]=True) -> Tuple:
__snake_case : List[str] = hidden_states
__snake_case : List[str] = self.norma(SCREAMING_SNAKE_CASE_)
__snake_case : Any = nn.swish(SCREAMING_SNAKE_CASE_)
__snake_case : int = self.conva(SCREAMING_SNAKE_CASE_)
__snake_case : List[Any] = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_))
__snake_case : Any = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1) , 1)
__snake_case : Optional[int] = hidden_states + temb
__snake_case : Optional[int] = self.norma(SCREAMING_SNAKE_CASE_)
__snake_case : int = nn.swish(SCREAMING_SNAKE_CASE_)
__snake_case : Tuple = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
__snake_case : List[str] = self.conva(SCREAMING_SNAKE_CASE_)
if self.conv_shortcut is not None:
__snake_case : Dict = self.conv_shortcut(SCREAMING_SNAKE_CASE_)
return hidden_states + residual
| 172
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-1'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-2'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-3'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
super()._init_()
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase_ (self ):
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 244
| 0
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (-y * np.log(_lowercase ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = np.dot(_lowercase , _lowercase )
return np.sum(y * scores - np.log(1 + np.exp(_lowercase ) ) )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=70000 ):
'''simple docstring'''
UpperCAmelCase_ : int = np.zeros(x.shape[1] )
for iterations in range(_lowercase ):
UpperCAmelCase_ : int = np.dot(_lowercase , _lowercase )
UpperCAmelCase_ : Dict = sigmoid_function(_lowercase )
UpperCAmelCase_ : List[str] = np.dot(x.T , h - y ) / y.size
UpperCAmelCase_ : List[str] = theta - alpha * gradient # updating the weights
UpperCAmelCase_ : Tuple = np.dot(_lowercase , _lowercase )
UpperCAmelCase_ : Dict = sigmoid_function(_lowercase )
UpperCAmelCase_ : Union[str, Any] = cost_function(_lowercase , _lowercase )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__a = datasets.load_iris()
__a = iris.data[:, :2]
__a = (iris.target != 0) * 1
__a = 0.1
__a = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return sigmoid_function(
np.dot(_lowercase , _lowercase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((__a) ,(__a)) = (x[:, 0].min(), x[:, 0].max())
((__a) ,(__a)) = (x[:, 1].min(), x[:, 1].max())
((__a) ,(__a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__a = np.c_[xxa.ravel(), xxa.ravel()]
__a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 235
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = True
@register_to_config
def __init__( self ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) ,_SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) ,_SCREAMING_SNAKE_CASE = (64,) ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "silu" ,_SCREAMING_SNAKE_CASE = 4 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 0.1_82_15 ,) -> Optional[int]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[Any] = Encoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,down_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,double_z=_SCREAMING_SNAKE_CASE ,)
# pass init params to Decoder
UpperCAmelCase_ : List[str] = Decoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,up_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
UpperCAmelCase_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,1 )
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : List[Any] = self.config.sample_size
UpperCAmelCase_ : List[str] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : int = 0.25
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> int:
if isinstance(_SCREAMING_SNAKE_CASE ,(Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def a__ ( self ,_SCREAMING_SNAKE_CASE = True ) -> Optional[Any]:
UpperCAmelCase_ : Dict = use_tiling
def a__ ( self ) -> Optional[Any]:
self.enable_tiling(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = True
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : int = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
UpperCAmelCase_ : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : int = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
UpperCAmelCase_ : List[str] = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Optional[Any] = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.decoder(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : Tuple = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : str = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : str = self._decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : str = min(a.shape[2] ,b.shape[2] ,_SCREAMING_SNAKE_CASE )
for y in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Any = min(a.shape[3] ,b.shape[3] ,_SCREAMING_SNAKE_CASE )
for x in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Any = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : str = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,x.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = []
for j in range(0 ,x.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : int = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.quant_conv(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,z.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = []
for j in range(0 ,z.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : List[str] = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.decoder(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[int] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : Optional[int] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = sample
UpperCAmelCase_ : Optional[Any] = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
UpperCAmelCase_ : List[Any] = posterior.sample(generator=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Union[str, Any] = posterior.mode()
UpperCAmelCase_ : List[Any] = self.decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
| 235
| 1
|
'''simple docstring'''
__lowerCAmelCase = 8.3_144_598
def UpperCAmelCase_ (__a : float , __a : float ):
"""simple docstring"""
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__lowerCAmelCase = 3_0_0
__lowerCAmelCase = 2_8
__lowerCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 271
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 1
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase :Optional[Any] = ['text', 'image', 'audio']
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class _lowerCAmelCase :
def _a (self ):
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
A_ : Tuple = self.tool.inputs
for _input in inputs:
if isinstance(_input , __lowerCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
A_ : List[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a (self ):
A_ : List[str] = create_inputs(self.tool.inputs )
A_ : List[Any] = self.tool(*__lowerCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
A_ : Optional[int] = [outputs]
self.assertListEqual(output_types(__lowerCamelCase ) , self.tool.outputs )
def _a (self ):
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _a (self ):
A_ : List[str] = create_inputs(self.tool.inputs )
A_ : Dict = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
A_ : str = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__lowerCamelCase , self.tool.outputs ):
A_ : Dict = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
def _a (self ):
A_ : Tuple = create_inputs(self.tool.inputs )
A_ : Optional[int] = []
for _input, input_type in zip(__lowerCamelCase , self.tool.inputs ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
A_ : Tuple = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
A_ : List[str] = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
| 367
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : List[str] =logging.get_logger(__name__)
a__ : Any ='''▁'''
a__ : Dict ={'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ : List[Any] ={
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
a__ : List[Any] ={
'''facebook/xglm-564M''': 2_048,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] =["input_ids", "attention_mask"]
def __init__( self : Dict , __A : List[Any] , __A : Dict="<s>" , __A : Any="</s>" , __A : Union[str, Any]="</s>" , __A : Optional[int]="<s>" , __A : Dict="<unk>" , __A : Optional[Any]="<pad>" , __A : Optional[Dict[str, Any]] = None , **__A : Union[str, Any] , ):
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__UpperCamelCase = 7
__UpperCamelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__UpperCamelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__UpperCamelCase = len(self.sp_model )
__UpperCamelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__A )
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
__UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , __A : Optional[int] ):
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCamelCase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _lowerCamelCase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A ))
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A ))
def _lowerCamelCase ( self : Dict , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _lowerCamelCase ( self : Optional[int] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : List[Any] , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def _lowerCamelCase ( self : int , __A : Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self : List[str] , __A : List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self : Dict , __A : int ):
__UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip()
return out_string
def _lowerCamelCase ( self : int , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 53
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283
| 0
|
"""simple docstring"""
import math
def UpperCAmelCase ( a_ ):
'''simple docstring'''
assert isinstance(a_, a_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCamelCase : int = range(3, int(math.sqrt(a_ ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def UpperCAmelCase ( a_, a_=1, **a_ ):
'''simple docstring'''
lowerCamelCase : str = factor * value
lowerCamelCase : str = value
while not is_prime(a_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **a_ )
return value
| 205
|
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return str(a_ ) == str(a_ )[::-1]
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return int(a_ ) + int(str(a_ )[::-1] )
def UpperCAmelCase ( a_ = 1_0000 ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = []
for num in range(1, a_ ):
lowerCamelCase : List[str] = 0
lowerCamelCase : Union[str, Any] = num
while iterations < 50:
lowerCamelCase : Optional[int] = sum_reverse(a_ )
iterations += 1
if is_palindrome(a_ ):
break
else:
lychrel_nums.append(a_ )
return len(a_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 205
| 1
|
'''simple docstring'''
import math
class A_ :
def lowercase ( self : Any , snake_case_ : int , snake_case_ : List[str] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase ( self : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(__SCREAMING_SNAKE_CASE ):
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 22
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 213
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :Dict = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[Any] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Dict = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Union[str, Any] = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_lowerCAmelCase :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''Translation''' ,init=a ,repr=a )
def __call__( self ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
a__ =None
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''TranslationVariableLanguages''' ,init=a ,repr=a )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : int = sorted(set(self.languages ) ) if self.languages else None
_UpperCAmelCase : List[str] = len(self.languages ) if self.languages else None
def __call__( self ) -> str:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : List[str] = set(self.languages )
if self.languages and set(A ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(A ) - lang_set ) )}) are not in valid set ({", ".join(A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(A , A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = zip(*sorted(A ) )
return {"language": languages, "translation": translations}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 68
| 0
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : np.ndarray ,__a : Union[int, Iterable[int]] ,__a : bool ,__a : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(__a : List[str] ,__a : Dict ,__a : Any=0 ,__a : int=None ):
_a : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_a : Any = math.floor(val / multiple ) * multiple
if x < min_val:
_a : Dict = math.ceil(val / multiple ) * multiple
return x
_a : Union[str, Any] = (output_size, output_size) if isinstance(__a ,__a ) else output_size
_a , _a : List[Any] = get_image_size(__a )
_a , _a : Any = output_size
# determine new height and width
_a : Union[str, Any] = output_height / input_height
_a : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_a : Optional[Any] = scale_width
else:
# fit height
_a : Tuple = scale_height
_a : Optional[Any] = constraint_to_multiple_of(scale_height * input_height ,multiple=__a )
_a : int = constraint_to_multiple_of(scale_width * input_width ,multiple=__a )
return (new_height, new_width)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
_a : Optional[int] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
_a : Optional[Any] = get_size_dict(_a )
_a : Any = do_resize
_a : Dict = size
_a : str = keep_aspect_ratio
_a : Any = ensure_multiple_of
_a : Optional[Any] = resample
_a : List[Any] = do_rescale
_a : int = rescale_factor
_a : Any = do_normalize
_a : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[Any] = get_resize_output_image_size(
_a , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> int:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : str = get_size_dict(_a )
_a : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_a : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_a : str = resample if resample is not None else self.resample
_a : str = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Dict = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : int = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : Optional[int] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : int = [to_channel_dimension_format(_a , _a ) for image in images]
_a : Tuple = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
def __lowercase ( self , _a , _a = None ) -> Any:
_a : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_a ):
_a : List[Any] = target_sizes.numpy()
_a : str = []
for idx in range(len(_a ) ):
_a : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a )
_a : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
_a : Tuple = logits.argmax(dim=1 )
_a : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 235
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "roformer"
def __init__( self , _a=5_0_0_0_0 , _a=None , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_5_3_6 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a=False , _a=True , **_a , ) -> List[str]:
super().__init__(pad_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : List[Any] = hidden_size if embedding_size is None else embedding_size
_a : Any = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Dict = type_vocab_size
_a : List[Any] = initializer_range
_a : Dict = layer_norm_eps
_a : Dict = rotary_value
_a : Dict = use_cache
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
_a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 235
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A : str = datasets.utils.logging.get_logger(__name__)
A : List[Any] = ['names', 'prefix']
A : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A : List[Any] = ['encoding_errors', 'on_bad_lines']
A : str = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
'''simple docstring'''
A__ = ","
A__ = None
A__ = "infer"
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = True
A__ = None
A__ = None
A__ = None
A__ = None
A__ = False
A__ = None
A__ = None
A__ = None
A__ = True
A__ = True
A__ = False
A__ = True
A__ = None
A__ = "."
A__ = None
A__ = '"'
A__ = 0
A__ = None
A__ = None
A__ = None
A__ = None
A__ = True
A__ = True
A__ = 0
A__ = True
A__ = False
A__ = None
A__ = 1_00_00
A__ = None
A__ = "strict"
A__ = "error"
A__ = None
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
if self.delimiter is not None:
lowercase__ = self.delimiter
if self.column_names is not None:
lowercase__ = self.column_names
@property
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
A__ = CsvConfig
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
lowercase__ = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [files]
lowercase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowercase__ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [files]
lowercase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
lowercase__ = self.config.features.arrow_schema
if all(not require_storage_cast(_UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
lowercase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase__ = table_cast(_UpperCAmelCase , _UpperCAmelCase )
return pa_table
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
lowercase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
lowercase__ = pd.read_csv(_UpperCAmelCase , iterator=_UpperCAmelCase , dtype=_UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_UpperCAmelCase ):
lowercase__ = pa.Table.from_pandas(_UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}''' )
raise
| 146
|
import pytest
A : Optional[Any] = '__dummy_dataset1__'
A : Tuple = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def UpperCamelCase ( ) -> Any:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Any ) -> str:
"""simple docstring"""
lowercase__ = dataset_loading_script_name
lowercase__ = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__magic_name__ )
lowercase__ = script_dir / f'''{script_name}.py'''
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
| 146
| 1
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36
|
"""simple docstring"""
import numpy as np
from PIL import Image
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Dict = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Union[str, Any] = 0
# compute the shape of the output matrix
__lowerCamelCase : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = 0
return updated_arr
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : str = 0
# compute the shape of the output matrix
__lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 135
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCamelCase__ =get_logger(__name__)
class lowerCAmelCase__( enum.Enum ):
'''simple docstring'''
__snake_case = 'all_checks'
__snake_case = 'basic_checks'
__snake_case = 'no_checks'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
_SCREAMING_SNAKE_CASE : Dict = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_SCREAMING_SNAKE_CASE : List[str] = " for " + verification_name if verification_name is not None else ""
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
_SCREAMING_SNAKE_CASE : int = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info("All the splits matched successfully." )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = True ):
if record_checksum:
_SCREAMING_SNAKE_CASE : int = shaaaa()
with open(__lowerCamelCase, "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ), B"" ):
m.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = m.hexdigest()
else:
_SCREAMING_SNAKE_CASE : Dict = None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def lowerCamelCase__ (__lowerCamelCase ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 1
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __lowerCAmelCase :
_a = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
_a = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
_a = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
_a = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def a ( ) -> List[Any]:
"""simple docstring"""
_lowercase =HfArgumentParser((ModelArguments,) )
((_lowercase) , ) =parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_lowercase =AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_lowercase =AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_lowercase =AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_lowercase =AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_lowercase =True
_lowercase =True
_lowercase =FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_lowercase =decoder_config.decoder_start_token_id
_lowercase =decoder_config.pad_token_id
if decoder_start_token_id is None:
_lowercase =decoder_config.bos_token_id
if pad_token_id is None:
_lowercase =decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_lowercase =decoder_config.eos_token_id
_lowercase =decoder_start_token_id
_lowercase =pad_token_id
_lowercase =AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_lowercase =AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_lowercase =tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 205
|
from __future__ import annotations
def a ( A__ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_lowercase =nums[0]
_lowercase =0
for num in nums[1:]:
_lowercase , _lowercase =(
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205
| 1
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCAmelCase_ ( __a ) -> Dict: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def lowerCAmelCase_ ( __a ) -> Optional[Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] ={}
lowerCamelCase__: Dict =[]
lowerCamelCase__: Any =1
lowerCamelCase__: List[str] =[1, 2]
lowerCamelCase__: List[str] ={"a": 1, "b": 2}
lowerCamelCase__: Optional[Any] ={"a": [1, 2], "b": [3, 4]}
lowerCamelCase__: Union[str, Any] ={"a": {"1": 1}, "b": 2}
lowerCamelCase__: Tuple ={"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase__: Any ={}
lowerCamelCase__: Union[str, Any] =[]
lowerCamelCase__: int =2
lowerCamelCase__: Dict =[2, 3]
lowerCamelCase__: List[str] ={"a": 2, "b": 3}
lowerCamelCase__: str ={"a": [2, 3], "b": [4, 5]}
lowerCamelCase__: List[str] ={"a": {"1": 2}, "b": 3}
lowerCamelCase__: Union[str, Any] ={"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__: List[str] =2
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__: int ={"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)}
lowerCamelCase__: Dict ={"a": 2, "b": 0, "c": 2}
lowerCamelCase__: Dict ={
"a": np.eye(2).astype(UpperCAmelCase_),
"b": np.zeros(3).astype(UpperCAmelCase_),
"c": np.ones(2).astype(UpperCAmelCase_),
}
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , map_numpy=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase_ , UpperCAmelCase_ , map_numpy=UpperCAmelCase_).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase_ , UpperCAmelCase_ , map_numpy=UpperCAmelCase_ , num_proc=UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase_ , UpperCAmelCase_ , map_numpy=UpperCAmelCase_ , num_proc=UpperCAmelCase_).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase_): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase_: x + 1 , UpperCAmelCase_ , num_proc=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] ={"a": 1, "b": 2}
lowerCamelCase__: int ={"a": 3, "b": 4}
lowerCamelCase__: Union[str, Any] ={"a": 5, "b": 6}
lowerCamelCase__: List[Any] =sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = "bar"
lowerCamelCase__: Dict =Foo()
self.assertEqual(foo.my_attr , "bar")
with temporary_assignment(UpperCAmelCase_ , "my_attr" , "BAR"):
self.assertEqual(foo.my_attr , "BAR")
self.assertEqual(foo.my_attr , "bar")
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
lowerCamelCase__: List[str] ={F"""{i}""": i for i in range(__a )}
lowerCamelCase__: str =map_nested(lambda __a : x + 10 , __a , num_proc=__a , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_tf
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
lowerCamelCase__: Optional[Any] =layers.Dense(2)
def gen_random_output():
lowerCamelCase__: Optional[int] =tf.random.uniform((1, 3))
return model(UpperCAmelCase_).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase_):
lowerCamelCase__: Optional[Any] =gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase_):
lowerCamelCase__: Any =gen_random_output()
lowerCamelCase__: Union[str, Any] =gen_random_output()
np.testing.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
import torch
def gen_random_output():
lowerCamelCase__: Union[str, Any] =torch.nn.Linear(3 , 2)
lowerCamelCase__: List[Any] =torch.rand(1 , 3)
return model(UpperCAmelCase_).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase_):
lowerCamelCase__: Tuple =gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase_):
lowerCamelCase__: Tuple =gen_random_output()
lowerCamelCase__: Optional[Any] =gen_random_output()
np.testing.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
lowerCamelCase__: Optional[int] =gen_random_output()
with temp_seed(42):
lowerCamelCase__: Tuple =gen_random_output()
lowerCamelCase__: Dict =gen_random_output()
np.testing.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize("input_data" , [{}] )
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: List[str] =NestedDataStructure(__a ).flatten()
assert output == expected_output
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =A(x=1 , y="foobar" )
lowerCamelCase__: Dict ={"x": 1, "y": "foobar"}
assert asdict(__a ) == expected_output
lowerCamelCase__: str ={"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
lowerCamelCase__: Optional[Any] ={"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10 , y="foo" )] )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
return text.split()
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with Pool(2 ) as pool:
lowerCamelCase__: Any =list(iflatmap_unordered(__a , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCamelCase__: Optional[int] =list(iflatmap_unordered(__a , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCamelCase__: str =[]
for yield_time, content in iflatmap_unordered(
__a , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__a ) == 4
| 368
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Dict=30 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Dict=None , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: List[str] =image_size
lowerCamelCase__: Tuple =patch_size
lowerCamelCase__: Tuple =num_channels
lowerCamelCase__: Tuple =is_training
lowerCamelCase__: Optional[int] =use_labels
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: List[str] =hidden_act
lowerCamelCase__: Optional[Any] =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: str =type_sequence_label_size
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: Optional[int] =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__: List[str] =(image_size // patch_size) ** 2
lowerCamelCase__: Optional[Any] =num_patches + 1
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: List[str] =None
if self.use_labels:
lowerCamelCase__: Any =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: List[str] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =TFViTModel(config=UpperCAmelCase_)
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , training=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
lowerCamelCase__: Dict =self.image_size // 2
lowerCamelCase__: List[str] =pixel_values[:, :, :image_size, :image_size]
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_)
lowerCamelCase__: List[str] =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Any =self.type_sequence_label_size
lowerCamelCase__: str =TFViTForImageClassification(UpperCAmelCase_)
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , labels=UpperCAmelCase_ , training=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
lowerCamelCase__: Union[str, Any] =self.image_size // 2
lowerCamelCase__: Dict =pixel_values[:, :, :image_size, :image_size]
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCamelCase__: int =1
lowerCamelCase__: List[str] =TFViTForImageClassification(UpperCAmelCase_)
lowerCamelCase__: List[str] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =config_and_inputs
lowerCamelCase__: Optional[Any] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase_ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =TFViTModelTester(self)
lowerCamelCase__: List[str] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: int =model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
lowerCamelCase__: Tuple =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , tf.keras.layers.Layer))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Tuple =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: str =[*signature.parameters.keys()]
lowerCamelCase__: str =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =TFViTModel.from_pretrained("google/vit-base-patch16-224")
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Any =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224")
lowerCamelCase__: List[str] =self.default_image_processor
lowerCamelCase__: Union[str, Any] =prepare_img()
lowerCamelCase__: List[Any] =image_processor(images=UpperCAmelCase_ , return_tensors="tf")
# forward pass
lowerCamelCase__: Dict =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: List[Any] =tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[Any] =tf.constant([-0.2744, 0.8215, -0.0836])
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4)
| 273
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( UpperCamelCase : Tuple ):
UpperCAmelCase : List[Any] = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCAmelCase : str = 6
UpperCAmelCase : List[Any] = 128
UpperCAmelCase : Any = (2, 2, 18, 2)
UpperCAmelCase : int = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase : str = 12
UpperCAmelCase : Union[str, Any] = 192
UpperCAmelCase : Dict = (2, 2, 18, 2)
UpperCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
UpperCAmelCase : Tuple = window_size
UpperCAmelCase : List[Any] = embed_dim
UpperCAmelCase : List[str] = depths
UpperCAmelCase : Union[str, Any] = num_heads
return config
def _snake_case ( UpperCamelCase : List[Any] ):
if "encoder.mask_token" in name:
UpperCAmelCase : Optional[int] = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase : int = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase : Optional[int] = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
UpperCAmelCase : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase : int = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
UpperCAmelCase : int = """layernorm.weight"""
if name == "encoder.norm.bias":
UpperCAmelCase : str = """layernorm.bias"""
if "decoder" in name:
pass
else:
UpperCAmelCase : Union[str, Any] = """swin.""" + name
return name
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Union[str, Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase : Tuple = key.split(""".""" )
UpperCAmelCase : Dict = int(key_split[2] )
UpperCAmelCase : Optional[int] = int(key_split[4] )
UpperCAmelCase : Dict = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase : Dict = val[:dim, :]
UpperCAmelCase : Any = val[
dim : dim * 2, :
]
UpperCAmelCase : Optional[Any] = val[-dim:, :]
else:
UpperCAmelCase : List[str] = val[
:dim
]
UpperCAmelCase : Dict = val[
dim : dim * 2
]
UpperCAmelCase : List[str] = val[
-dim:
]
else:
UpperCAmelCase : List[str] = val
return orig_state_dict
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Any = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
UpperCAmelCase : Optional[Any] = get_swin_config(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : Dict = SwinForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = ViTImageProcessor(size={"""height""": 192, """width""": 192} )
UpperCAmelCase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCAmelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**SCREAMING_SNAKE_CASE_ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(F"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(F"microsoft/{model_name}" )
image_processor.push_to_hub(F"microsoft/{model_name}" )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A: Union[str, Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 109
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = data
def __iter__( self : int):
"""simple docstring"""
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=True ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> List[Any]:
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowercase_ = accelerator.prepare(__lowerCAmelCase )
return dl
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCAmelCase , dataset_size=__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCAmelCase )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCAmelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCAmelCase )
lowercase_ = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCAmelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCAmelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCAmelCase )
lowercase_ = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCAmelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
lowercase_ = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCAmelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 313
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __magic_name__ ( __lowerCAmelCase):
A: Any = "luke"
def __init__( self : int , lowerCamelCase__ : Dict=50267 , lowerCamelCase__ : Tuple=500000 , lowerCamelCase__ : Optional[Any]=768 , lowerCamelCase__ : List[str]=256 , lowerCamelCase__ : int=12 , lowerCamelCase__ : Optional[Any]=12 , lowerCamelCase__ : Union[str, Any]=3072 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : int=512 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : Union[str, Any]=1E-1_2 , lowerCamelCase__ : int=True , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : str=0 , lowerCamelCase__ : Dict=2 , **lowerCamelCase__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : Optional[Any] = entity_vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : Dict = entity_emb_size
UpperCamelCase__ : List[Any] = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : List[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Dict = max_position_embeddings
UpperCamelCase__ : Any = type_vocab_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : List[str] = layer_norm_eps
UpperCamelCase__ : Tuple = use_entity_aware_attention
UpperCamelCase__ : Tuple = classifier_dropout
| 146
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _a ( SCREAMING_SNAKE_CASE : str = "dhaka" , SCREAMING_SNAKE_CASE : int = 5 ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCamelCase__ : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase__ : List[str] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase__ : Union[str, Any] = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase__ : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = json.loads(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCamelCase__ : Optional[Any] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : List[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCamelCase__ : Optional[int] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = urllib.request.build_opener()
UpperCamelCase__ : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCamelCase : List[Any] = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 146
| 1
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase_ : int = TypeVar('KEY')
UpperCAmelCase_ : Dict = TypeVar('VAL')
@dataclass(frozen=__lowerCamelCase , slots=__lowerCamelCase )
class lowercase__ ( Generic[KEY, VAL] ):
'''simple docstring'''
a : KEY
a : VAL
class lowercase__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
"""simple docstring"""
super().__init__(__magic_name__, __magic_name__ )
def __bool__( self ) -> bool:
"""simple docstring"""
return False
UpperCAmelCase_ : Optional[Any] = _DeletedItem()
class lowercase__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, __magic_name__ = 8, __magic_name__ = 0.75 ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = initial_block_size
UpperCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCamelCase__ : Optional[Any] = capacity_factor
UpperCamelCase__ : Union[str, Any] = 0
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return hash(__magic_name__ ) % len(self._buckets )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self._buckets[ind]
if not stored:
UpperCamelCase__ : Any = _Item(__magic_name__, __magic_name__ )
self._len += 1
return True
elif stored.key == key:
UpperCamelCase__ : Union[str, Any] = _Item(__magic_name__, __magic_name__ )
return True
else:
return False
def UpperCamelCase__ ( self ) -> bool:
"""simple docstring"""
UpperCamelCase__ : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__magic_name__ )
def UpperCamelCase__ ( self ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self._buckets
UpperCamelCase__ : Optional[int] = [None] * new_size
UpperCamelCase__ : int = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def UpperCamelCase__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self, __magic_name__ ) -> Iterator[int]:
"""simple docstring"""
UpperCamelCase__ : str = self._get_bucket_index(__magic_name__ )
for _ in range(len(self._buckets ) ):
yield ind
UpperCamelCase__ : Dict = self._get_next_ind(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(__magic_name__ ):
if self._try_set(__magic_name__, __magic_name__, __magic_name__ ):
break
def __setitem__( self, __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(__magic_name__, __magic_name__ )
def __delitem__( self, __magic_name__ ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(__magic_name__ ):
UpperCamelCase__ : Any = self._buckets[ind]
if item is None:
raise KeyError(__magic_name__ )
if item is _deleted:
continue
if item.key == key:
UpperCamelCase__ : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, __magic_name__ ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(__magic_name__ ):
UpperCamelCase__ : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__magic_name__ )
def __len__( self ) -> int:
"""simple docstring"""
return self._len
def __iter__( self ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''' ,'''.join(
f"{item.key}: {item.val}" for item in self._buckets if item )
return f"HashMap({val_string})"
| 362
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any]=False ) -> List[Any]:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Any , __UpperCAmelCase: Dict=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Tuple = ''''''
else:
UpperCamelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple ) -> Dict:
UpperCamelCase__ : List[str] = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : int = val
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any]=True ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase__ : List[str] = 8
# set labels if required
if not base_model:
UpperCamelCase__ : Union[str, Any] = 1000
UpperCamelCase__ : Optional[Any] = '''huggingface/label-files'''
UpperCamelCase__ : Dict = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase__ : str = 384
UpperCamelCase__ : str = 1536
UpperCamelCase__ : Tuple = 12
UpperCamelCase__ : Optional[int] = 6
# load original model from torch hub
UpperCamelCase__ : Any = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : str = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
UpperCamelCase__ : int = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
UpperCamelCase__ : int = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
UpperCamelCase__ : Optional[int] = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase__ : Dict = ViTImageProcessor()
UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Optional[Any] = encoding['''pixel_values''']
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
if base_model:
UpperCamelCase__ : Union[str, Any] = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCamelCase__ : Any = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 247
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase = 16
_lowerCAmelCase = 32
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 16 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ : Optional[int] = DatasetDict(
{
"""train""": dataset["""train"""].select(UpperCamelCase ),
"""validation""": dataset["""train"""].select(UpperCamelCase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : Tuple = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Dict = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : int = 8
else:
lowerCAmelCase__ : Optional[int] = None
return tokenizer.pad(
UpperCamelCase , padding="""longest""" , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase__ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
lowerCAmelCase__ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
lowerCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets["""test"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = []
# Download the dataset
lowerCAmelCase__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
lowerCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Optional[int] = config["""lr"""]
lowerCAmelCase__ : List[Any] = int(config["""num_epochs"""] )
lowerCAmelCase__ : str = int(config["""seed"""] )
lowerCAmelCase__ : Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase__ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : str = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase )
# New Code #
# Create our folds:
lowerCAmelCase__ : str = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
lowerCAmelCase__ : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_fold_dataloaders(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
lowerCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ : Optional[int] = model(**UpperCamelCase )
lowerCAmelCase__ : Optional[int] = outputs.loss
lowerCAmelCase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**UpperCamelCase )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase__ : Union[str, Any] = []
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs.logits
lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase , dim=0 )
lowerCAmelCase__ : str = torch.stack(UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase__ : Optional[int] = metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
accelerator.print("""Average test metrics from all folds:""" , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase , default=UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=UpperCamelCase , default=3 , help="""The number of splits to perform across the dataset""" )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 184
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184
| 1
|
def __snake_case ( _lowerCAmelCase : Tuple ) -> bool:
A_ : Dict = 0
for ch in input_str:
A_ : str = ord(UpperCamelCase__ )
A_ : List[Any] = pow(2 , UpperCamelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A__ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = TextaTextGenerationPipeline(model=__a , tokenizer=__a)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = generator("Something there")
self.assertEqual(__a , [{"generated_text": ANY(__a)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
__lowerCAmelCase : Optional[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a)
self.assertEqual(
__a , [
[{"generated_text": ANY(__a)}, {"generated_text": ANY(__a)}],
[{"generated_text": ANY(__a)}, {"generated_text": ANY(__a)}],
] , )
__lowerCAmelCase : Union[str, Any] = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a)
self.assertEqual(
__a , [
[{"generated_text": ANY(__a)}, {"generated_text": ANY(__a)}],
[{"generated_text": ANY(__a)}, {"generated_text": ANY(__a)}],
] , )
with self.assertRaises(__a):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
__lowerCAmelCase : Any = generator("Something there" , do_sample=__a)
self.assertEqual(__a , [{"generated_text": ""}])
__lowerCAmelCase : str = 3
__lowerCAmelCase : Optional[Any] = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase : str = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a)
__lowerCAmelCase : List[Any] = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a)
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
__lowerCAmelCase : str = generator.model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = "<pad>"
__lowerCAmelCase : List[str] = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
__lowerCAmelCase : Dict = generator("Something there" , do_sample=__a)
self.assertEqual(__a , [{"generated_text": ""}])
| 361
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = ort.SessionOptions()
__lowerCAmelCase : Dict = False
return options
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
__lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
__lowerCAmelCase : int = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = "A red cat sitting on a park bench"
__lowerCAmelCase : Optional[int] = np.random.RandomState(0)
__lowerCAmelCase : Optional[int] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 58
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : int = data
def __iter__( self ) ->Dict:
for element in self.data:
yield element
def UpperCAmelCase_( a__=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Accelerator(even_batches=a__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
if iterable:
SCREAMING_SNAKE_CASE : int = DummyIterableDataset(torch.as_tensor(range(a__ ) ) )
else:
SCREAMING_SNAKE_CASE : int = TensorDataset(torch.as_tensor(range(a__ ) ) )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(a__ , batch_size=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(a__ )
return dl
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = create_dataloader(accelerator=a__ , dataset_size=a__ , batch_size=a__ )
SCREAMING_SNAKE_CASE : Optional[int] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = create_accelerator(even_batches=a__ )
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = create_accelerator(even_batches=a__ )
SCREAMING_SNAKE_CASE : str = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE : List[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE : Dict = output.sum()
loss.backward()
batch_idxs.append(a__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with warnings.catch_warnings(record=a__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = create_accelerator(even_batches=a__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : str = accelerator.prepare(a__ )
SCREAMING_SNAKE_CASE : int = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE : Dict = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
SCREAMING_SNAKE_CASE : Tuple = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE : List[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Tuple = create_accelerator(even_batches=a__ )
SCREAMING_SNAKE_CASE : int = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(a__ )
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ )
SCREAMING_SNAKE_CASE : Any = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = create_accelerator()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE : int = accelerator.prepare(a__ )
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ )
with warnings.catch_warnings(record=a__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
pass
assert issubclass(w[-1].category , a__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
SCREAMING_SNAKE_CASE : Tuple = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a__ )
SCREAMING_SNAKE_CASE : Tuple = original_state
if __name__ == "__main__":
main()
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 363
|
"""simple docstring"""
import baseaa
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def a__ ( SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
return baseaa.baadecode(SCREAMING_SNAKE_CASE ).decode("utf-8" )
if __name__ == "__main__":
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = baseaa_encode(test)
print(encoded)
lowerCAmelCase__ = baseaa_decode(encoded)
print(decoded)
| 133
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __A ( A_ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase=0.01 , __lowerCAmelCase=1_0_0_0 ):
'''simple docstring'''
lowerCamelCase__ = p_stop
lowerCamelCase__ = max_length
def __iter__( self ):
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCamelCase__ = random.random() < self.p_stop
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ):
'''simple docstring'''
lowerCamelCase__ = [
BatchSamplerShard(snake_case_ , 2 , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
for i in range(2 )
]
lowerCamelCase__ = [list(snake_case_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case_ ) for shard in batch_sampler_shards] , [len(snake_case_ ) for e in expected] )
self.assertListEqual(snake_case_ , snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCamelCase__ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is very small.
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is very small.
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCamelCase__ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is very small.
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
lowerCamelCase__ = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is very small.
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
lowerCamelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
lowerCamelCase__ = [BatchSamplerShard(snake_case_ , 2 , snake_case_ , even_batches=snake_case_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=False ):
'''simple docstring'''
random.seed(snake_case_ )
lowerCamelCase__ = list(snake_case_ )
lowerCamelCase__ = [
IterableDatasetShard(
snake_case_ , batch_size=snake_case_ , drop_last=snake_case_ , num_processes=snake_case_ , process_index=snake_case_ , split_batches=snake_case_ , )
for i in range(snake_case_ )
]
lowerCamelCase__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case_ )
iterable_dataset_lists.append(list(snake_case_ ) )
lowerCamelCase__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCamelCase__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
self.assertTrue(len(snake_case_ ) % shard_batch_size == 0 )
lowerCamelCase__ = []
for idx in range(0 , len(snake_case_ ) , snake_case_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case_ ) < len(snake_case_ ):
reference += reference
self.assertListEqual(snake_case_ , reference[: len(snake_case_ )] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 4_2
lowerCamelCase__ = RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
# Edge case with a very small dataset
lowerCamelCase__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=snake_case_ )
lowerCamelCase__ = SkipBatchSampler(snake_case_ , 2 )
self.assertListEqual(list(snake_case_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = DataLoader(list(range(1_6 ) ) , batch_size=4 )
lowerCamelCase__ = skip_first_batches(snake_case_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
lowerCamelCase__ = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 209
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''openai/whisper-base'''
lowercase__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowercase__ = '''transcriber'''
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ['''audio''']
lowercase__ = ['''text''']
def __magic_name__ ( self : Optional[int] , snake_case_ : Dict ) -> int:
'''simple docstring'''
return self.pre_processor(snake_case_ , return_tensors="pt" ).input_features
def __magic_name__ ( self : int , snake_case_ : int ) -> str:
'''simple docstring'''
return self.model.generate(inputs=snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )[0]
| 247
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__UpperCAmelCase = datasets.logging.get_logger(__name__)
__UpperCAmelCase = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
__UpperCAmelCase = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
__UpperCAmelCase = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase=False, __lowerCamelCase=True, __lowerCamelCase=False, __lowerCamelCase="dummy_doc" ):
SCREAMING_SNAKE_CASE_ = {doc: key_lines}
SCREAMING_SNAKE_CASE_ = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = reader.get_doc_mentions(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ = reader.set_annotated_parse_trees(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = reader.get_doc_mentions(__lowerCamelCase, sys_doc_lines[doc], __lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ = reader.set_annotated_parse_trees(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase, __lowerCamelCase )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = reader.remove_nested_coref_mentions(__lowerCamelCase, __lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = reader.remove_nested_coref_mentions(__lowerCamelCase, __lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ = reader.get_mention_assignments(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = reader.get_mention_assignments(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'''files, respectively''' )
return doc_coref_infos
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = get_coref_infos(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = evaluator.evaluate_documents(__lowerCamelCase, __lowerCamelCase, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ), F'''Recall: {recall * 1_00:.2f}''', F''' Precision: {precision * 1_00:.2f}''', F''' F1: {fa * 1_00:.2f}''', )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ = (conll / 3) * 1_00
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _UpperCamelCase ( self , _A , _A , _A=True , _A=False , _A=False , _A=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 257
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["transformers", "torch", "note_seq"]
def __init__( self , *_A , **_A ) -> Any:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> List[str]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 257
| 1
|
from ...configuration_utils import PretrainedConfig
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "bert-generation"
def __init__( self : Any , __lowerCamelCase : List[str]=50358 , __lowerCamelCase : str=1024 , __lowerCamelCase : int=24 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=4096 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : int=512 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : int=1E-1_2 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : int=1 , __lowerCamelCase : List[Any]="absolute" , __lowerCamelCase : Optional[int]=True , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : str = use_cache
| 184
|
class _lowercase :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = size
lowerCamelCase__ : List[str] = [0] * size
lowerCamelCase__ : str = [0] * size
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = value
while index < self.size:
lowerCamelCase__ : Tuple = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
lowerCamelCase__ : Optional[Any] = value
else:
lowerCamelCase__ : str = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict = self.get_next(__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCamelCase__ : str = 0
while left <= right:
lowerCamelCase__ : Optional[Any] = self.get_prev(__lowerCamelCase )
if left <= current_left:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.tree[right] )
lowerCamelCase__ : Any = current_left
else:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
| 1
|
import pytest
_lowerCamelCase : Dict = '''__dummy_dataset1__'''
_lowerCamelCase : Union[str, Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a_ ( ) -> Optional[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a_ ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a_ ( __lowercase : Dict , __lowercase : Any , __lowercase : int ) -> Dict:
_snake_case = dataset_loading_script_name
_snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=__lowercase )
_snake_case = script_dir / f'''{script_name}.py'''
with open(__lowercase , 'w' ) as f:
f.write(__lowercase )
return str(__lowercase )
| 130
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride
| 130
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase__ : int = 250_004
lowerCAmelCase__ : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = MBartTokenizer
snake_case__ = MBartTokenizerFast
snake_case__ = True
snake_case__ = True
def __lowerCAmelCase ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = MBartTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = MBartTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def __lowerCAmelCase ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase__ ,legacy_format=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ ,lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = "facebook/mbart-large-en-ro"
snake_case__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
snake_case__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
snake_case__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def __lowerCAmelCase ( cls : int ):
UpperCAmelCase__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='en_XX' ,tgt_lang='ro_RO' )
UpperCAmelCase__ = 1
return cls
def __lowerCAmelCase ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] ,250_020 )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
self.assertIn(lowerCamelCase__ ,self.tokenizer.all_special_ids )
UpperCAmelCase__ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase__ = self.tokenizer.decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
UpperCAmelCase__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] ,lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.tokenizer(lowerCamelCase__ ,max_length=lowerCamelCase__ ,truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) ,[250_026, 250_001] )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = MBartTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase__ ,return_tensors='pt' )
UpperCAmelCase__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,)
UpperCAmelCase__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
UpperCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase__ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.tokenizer(self.src_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=3 ,return_tensors='pt' )
UpperCAmelCase__ = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=10 ,return_tensors='pt' )
UpperCAmelCase__ = targets['input_ids']
UpperCAmelCase__ = shift_tokens_right(lowerCamelCase__ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.tokenizer._build_translation_inputs(
'A test' ,return_tensors='pt' ,src_lang='en_XX' ,tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) ,{
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} ,)
| 98
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=40 , A=2 , A=1 , A=0 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def snake_case_( self , A , A ) -> int:
_SCREAMING_SNAKE_CASE = TFPegasusModel(config=A ).get_decoder()
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids[:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
_SCREAMING_SNAKE_CASE = 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , head_mask=A , use_cache=A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )[0]
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1e-3 )
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ) ->int:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = TFPegasusModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A )
def snake_case_( self ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def snake_case_( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case_( self , **A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.translate_src_text(**A )
assert self.expected_text == generated_words
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **A , padding=A , return_tensors="""tf""" )
_SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )
return generated_words
@slow
def snake_case_( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 58
| 0
|
'''simple docstring'''
import numpy as np
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Dict = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
snake_case__ : Union[str, Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
snake_case__ : str = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ : List[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
a_ : Optional[int] = MultilingualCLIP(SCREAMING_SNAKE_CASE__ )
a_ : Any = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
a_ : str = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a_ : Optional[int] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
a_ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : Tuple = self.dummy_text_encoder
a_ : Union[str, Any] = self.dummy_tokenizer
a_ : List[Any] = self.dummy_unet
a_ : Optional[Any] = self.dummy_movq
a_ : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , prediction_type='epsilon' , thresholding=SCREAMING_SNAKE_CASE__ , )
a_ : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> Dict:
a_ : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE__ )
# create init_image
a_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
a_ : Optional[int] = np.ones((6_4, 6_4) , dtype=np.floataa )
a_ : Dict = 0
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : int = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Dict = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Optional[Any] = 'cpu'
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
a_ : Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
a_ : Tuple = output.images
a_ : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
a_ : List[str] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
a_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
a_ : Union[str, Any] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
a_ : Tuple = 0
a_ : Union[str, Any] = 'a hat'
a_ : List[str] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
a_ : int = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
a_ : Union[str, Any] = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
a_ , a_ : List[str] = pipe_prior(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a_ : str = pipeline(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='np' , )
a_ : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 32
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowercase_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : str , *snake_case__ : Any , **snake_case__ : str ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(snake_case__ )
def __call__( self : Dict , snake_case__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case__ : List[Any] ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
return {}, {}, {}
def UpperCamelCase ( self : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = load_image(snake_case__ )
_UpperCAmelCase = image.size
_UpperCAmelCase = self.image_processor(images=snake_case__ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self : int , snake_case__ : int ):
"""simple docstring"""
_UpperCAmelCase = self.model(**snake_case__ )
return model_outputs
def UpperCamelCase ( self : List[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = model_outputs.predicted_depth
_UpperCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=snake_case__ )
_UpperCAmelCase = prediction.squeeze().cpu().numpy()
_UpperCAmelCase = (output * 255 / np.max(snake_case__ )).astype("uint8" )
_UpperCAmelCase = Image.fromarray(snake_case__ )
_UpperCAmelCase = {}
_UpperCAmelCase = predicted_depth
_UpperCAmelCase = depth
return output_dict
| 133
| 0
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _a :
"""simple docstring"""
_lowerCamelCase : Dict = XGLMConfig
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = 'gelu'
def __init__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : str=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=0.02 , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_labels
A_ = vocab_size
A_ = d_model
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = ffn_dim
A_ = activation_function
A_ = activation_dropout
A_ = attention_dropout
A_ = max_position_embeddings
A_ = initializer_range
A_ = None
A_ = 0
A_ = 2
A_ = 1
def __A ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def __A ( self : Any ):
A_ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = self.get_config()
A_ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self : List[str] ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def __A ( self : Any ):
A_ = self.prepare_config_and_inputs()
(
A_
) = config_and_inputs
A_ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class _a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowerCamelCase : Tuple = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowerCamelCase : Optional[Any] = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Union[str, Any] = False
def __A ( self : Optional[int] ):
A_ = TFXGLMModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def __A ( self : str ):
self.config_tester.run_common_tests()
@slow
def __A ( self : Optional[int] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFXGLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def __A ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : str , UpperCAmelCase : Any=True ):
A_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A_ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A_ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
@slow
def __A ( self : Optional[Any] ):
A_ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
A_ = tokenizer("Today is a nice day and" , return_tensors="tf" )
A_ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] )
A_ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ )
A_ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __A ( self : Dict ):
A_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A_ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A_ = '''left'''
# use different length sentences to test batching
A_ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
A_ = tokenizer(UpperCamelCase__ , return_tensors="tf" , padding=UpperCamelCase__ )
A_ = inputs['''input_ids''']
A_ = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
A_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
A_ = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 )
A_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
A_ = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 )
A_ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
A_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
A_ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
| 367
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Tuple ='''Hello world! cécé herlolip'''
def __lowercase ( a__ , a__ , a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = FairseqRobertaModel.from_pretrained(a__ )
roberta.eval() # disable dropout
__SCREAMING_SNAKE_CASE = roberta.model.encoder.sentence_encoder
__SCREAMING_SNAKE_CASE = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__SCREAMING_SNAKE_CASE = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , a__ )
__SCREAMING_SNAKE_CASE = XLMRobertaXLForSequenceClassification(a__ ) if classification_head else XLMRobertaXLForMaskedLM(a__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__SCREAMING_SNAKE_CASE = roberta_sent_encoder.embed_tokens.weight
__SCREAMING_SNAKE_CASE = roberta_sent_encoder.embed_positions.weight
__SCREAMING_SNAKE_CASE = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__SCREAMING_SNAKE_CASE = roberta_sent_encoder.layer_norm.weight
__SCREAMING_SNAKE_CASE = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__SCREAMING_SNAKE_CASE = model.roberta.encoder.layer[i]
__SCREAMING_SNAKE_CASE = roberta_sent_encoder.layers[i]
__SCREAMING_SNAKE_CASE = layer.attention
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn_layer_norm.weight
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn_layer_norm.bias
# self attention
__SCREAMING_SNAKE_CASE = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.q_proj.weight
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.q_proj.bias
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.k_proj.weight
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.k_proj.bias
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.v_proj.weight
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.v_proj.bias
# self-attention output
__SCREAMING_SNAKE_CASE = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.out_proj.weight
__SCREAMING_SNAKE_CASE = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__SCREAMING_SNAKE_CASE = roberta_layer.final_layer_norm.weight
__SCREAMING_SNAKE_CASE = roberta_layer.final_layer_norm.bias
# intermediate
__SCREAMING_SNAKE_CASE = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__SCREAMING_SNAKE_CASE = roberta_layer.fca.weight
__SCREAMING_SNAKE_CASE = roberta_layer.fca.bias
# output
__SCREAMING_SNAKE_CASE = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__SCREAMING_SNAKE_CASE = roberta_layer.fca.weight
__SCREAMING_SNAKE_CASE = roberta_layer.fca.bias
# end of layer
if classification_head:
__SCREAMING_SNAKE_CASE = roberta.model.classification_heads['mnli'].dense.weight
__SCREAMING_SNAKE_CASE = roberta.model.classification_heads['mnli'].dense.bias
__SCREAMING_SNAKE_CASE = roberta.model.classification_heads['mnli'].out_proj.weight
__SCREAMING_SNAKE_CASE = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__SCREAMING_SNAKE_CASE = roberta.model.encoder.lm_head.dense.weight
__SCREAMING_SNAKE_CASE = roberta.model.encoder.lm_head.dense.bias
__SCREAMING_SNAKE_CASE = roberta.model.encoder.lm_head.layer_norm.weight
__SCREAMING_SNAKE_CASE = roberta.model.encoder.lm_head.layer_norm.bias
__SCREAMING_SNAKE_CASE = roberta.model.encoder.lm_head.weight
__SCREAMING_SNAKE_CASE = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__SCREAMING_SNAKE_CASE = roberta.encode(a__ ).unsqueeze(0 ) # batch of size 1
__SCREAMING_SNAKE_CASE = model(a__ )[0]
if classification_head:
__SCREAMING_SNAKE_CASE = roberta.model.classification_heads['mnli'](roberta.extract_features(a__ ) )
else:
__SCREAMING_SNAKE_CASE = roberta.model(a__ )[0]
print(our_output.shape , their_output.shape )
__SCREAMING_SNAKE_CASE = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__SCREAMING_SNAKE_CASE = torch.allclose(a__ , a__ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(a__ ).mkdir(parents=a__ , exist_ok=a__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase__ : int =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 257
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ : List[Any] =None
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : int ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : int ={
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCAmelCase__ : Dict =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[str] = NllbTokenizer
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , _A=False , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'eng_Latn'
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
__SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self , _A , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
__SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _A ( self , _A , _A = "eng_Latn" , _A = None , _A = "fra_Latn" , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _A ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 257
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Any = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : int ) -> List[str]:
__A : Optional[int] = original_name.split('.' )[0]
__A : Dict = key.split('.' )
__A : List[Any] = int(key_list[key_list.index(__snake_case ) - 2] )
__A : List[Any] = int(key_list[key_list.index(__snake_case ) - 1] )
__A : int = orig_block_num - offset
__A : Union[str, Any] = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _lowerCAmelCase ( __snake_case : Any ) -> int:
__A : Dict = OrderedDict()
__A : Optional[Any] = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__A : Any = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__A : int = key[: key.find('proj' )]
__A : int = key.replace(__snake_case , f'patch_embeddings.{total_embed_found}.' )
__A : Tuple = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__A : Optional[Any] = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
__A : List[str] = replace_key_with_offset(__snake_case , __snake_case , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__A : Dict = replace_key_with_offset(__snake_case , __snake_case , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__A : List[Any] = replace_key_with_offset(__snake_case , __snake_case , 'norm1' , 'before_norm' )
if "norm2" in key:
__A : List[Any] = replace_key_with_offset(__snake_case , __snake_case , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__A : Optional[int] = replace_key_with_offset(__snake_case , __snake_case , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__A : Optional[Any] = replace_key_with_offset(__snake_case , __snake_case , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__A : Dict = key.replace('head' , 'classifier' )
__A : Optional[Any] = value
return new_state_dict
def _lowerCAmelCase ( ) -> str:
__A : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any ) -> List[Any]:
__A : Dict = PoolFormerConfig()
# set attributes based on model_name
__A : Dict = 'huggingface/label-files'
__A : List[str] = model_name[-3:]
__A : Optional[int] = 10_00
__A : Tuple = 'imagenet-1k-id2label.json'
__A : Optional[Any] = (1, 10_00)
# set config attributes
__A : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
__A : Any = {int(__snake_case ): v for k, v in idalabel.items()}
__A : List[Any] = idalabel
__A : Tuple = {v: k for k, v in idalabel.items()}
if size == "s12":
__A : Union[str, Any] = [2, 2, 6, 2]
__A : Union[str, Any] = [64, 1_28, 3_20, 5_12]
__A : Tuple = 4.0
__A : int = 0.9
elif size == "s24":
__A : Dict = [4, 4, 12, 4]
__A : int = [64, 1_28, 3_20, 5_12]
__A : str = 4.0
__A : List[Any] = 0.9
elif size == "s36":
__A : List[Any] = [6, 6, 18, 6]
__A : Optional[int] = [64, 1_28, 3_20, 5_12]
__A : Any = 4.0
__A : Tuple = 1e-6
__A : Any = 0.9
elif size == "m36":
__A : Tuple = [6, 6, 18, 6]
__A : int = [96, 1_92, 3_84, 7_68]
__A : List[str] = 4.0
__A : Dict = 1e-6
__A : List[str] = 0.95
elif size == "m48":
__A : List[Any] = [8, 8, 24, 8]
__A : Dict = [96, 1_92, 3_84, 7_68]
__A : int = 4.0
__A : Any = 1e-6
__A : int = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
__A : List[Any] = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
__A : List[str] = prepare_img()
__A : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
__A : str = torch.load(__snake_case , map_location=torch.device('cpu' ) )
# rename keys
__A : Dict = rename_keys(__snake_case )
# create HuggingFace model and load state dict
__A : Dict = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
__A : Union[str, Any] = PoolFormerImageProcessor(crop_pct=__snake_case )
__A : List[str] = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__A : Optional[int] = model(__snake_case )
__A : Optional[int] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__A : str = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
__A : int = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
__A : Dict = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
__A : Tuple = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
__A : Dict = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase__ : str = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 358
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 190
| 0
|
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase__ = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = {"BertModelTest": "BertModelTester"}
lowercase__ : Optional[Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
lowercase__ : List[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
lowercase__ : int = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 130
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : Optional[Any] = 8
# DPR tok
lowercase__ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[Any] = {"unk_token": "<unk>"}
lowercase__ : Any = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Any ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def snake_case ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Optional[int] ):
lowercase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Union[str, Any] = dataset
lowercase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Any = os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : Tuple = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Dict = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : List[str] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
lowercase__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Any = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case ( self : int ):
lowercase__ : Any = 1
lowercase__ : str = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : str ):
lowercase__ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : List[str] ):
lowercase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = 1
lowercase__ : List[str] = self.get_dummy_legacy_index_retriever()
lowercase__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : Any ):
import torch
lowercase__ : List[Any] = 1
lowercase__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = [[5, 7], [10, 11]]
lowercase__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
lowercase__ : List[str] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : int ):
lowercase__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Optional[int] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [[5, 7], [10, 11]]
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 130
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase: Union[str, Any] = logging.get_logger(__name__)
_lowercase: str = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowercase ( __UpperCamelCase ):
"""simple docstring"""
__A = "levit"
def __init__(self , lowerCamelCase_=224 , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=16 , lowerCamelCase_=[128, 256, 384] , lowerCamelCase_=[4, 8, 12] , lowerCamelCase_=[4, 4, 4] , lowerCamelCase_=[16, 16, 16] , lowerCamelCase_=0 , lowerCamelCase_=[2, 2, 2] , lowerCamelCase_=[2, 2, 2] , lowerCamelCase_=0.02 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
a = image_size
a = num_channels
a = kernel_size
a = stride
a = padding
a = hidden_sizes
a = num_attention_heads
a = depths
a = key_dim
a = drop_path_rate
a = patch_size
a = attention_ratio
a = mlp_ratio
a = initializer_range
a = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowercase ( __UpperCamelCase ):
"""simple docstring"""
__A = version.parse("1.11" )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 1E-4
| 351
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = embedding_size
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertModel(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
a = [input_ids, input_mask]
a = model(lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForPreTraining(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_choices
a = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertModelTest.TFMobileBertModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
a = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
a = tf.constant([[0, 1, 2, 3, 4, 5]] )
a = model(lowerCamelCase_ )[0]
a = [1, 6, 30522]
self.assertEqual(output.shape , lowerCamelCase_ )
a = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 71
| 0
|
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =LxmertTokenizer
lowerCamelCase : Tuple =LxmertTokenizerFast
lowerCamelCase : Dict =True
lowerCamelCase : Tuple =True
def __a ( self ) -> Any:
super().setUp()
a : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : int = "UNwant\u00E9d,running"
a : Optional[Any] = "unwanted, running"
return input_text, output_text
def __a ( self ) -> List[str]:
a : Union[str, Any] = self.tokenizer_class(self.vocab_file )
a : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __a ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
a : str = self.get_tokenizer()
a : List[str] = self.get_rust_tokenizer()
a : str = "I was born in 92000, and this is falsé."
a : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
a : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a : Dict = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = self.get_rust_tokenizer()
a : int = tokenizer.encode(lowerCAmelCase__ )
a : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 79
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
pass
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> str:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a, a : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = {"vision_model": vision_model, "text_model": text_model}
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
a, a : Dict = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = {"vision_model": vision_model, "text_model": text_model}
a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
a : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : List[Any] = after_output[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
a, a : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = {"vision_model": vision_model, "text_model": text_model}
a : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : Tuple = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
a : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[int] = to_atuple(vision_model.config.image_size )
a : Tuple = to_atuple(vision_model.config.patch_size )
a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a : str = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
a : List[Any] = inputs_dict
a : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a : int = pt_model(**lowerCAmelCase__ ).to_tuple()
a : Union[str, Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
a : Optional[int] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
a : int = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
a : List[str] = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def __a ( self ) -> Any:
a : List[Any] = self.prepare_config_and_inputs()
a : Tuple = config_inputs_dict.pop("vision_config" )
a : int = config_inputs_dict.pop("text_config" )
a : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __a ( self ) -> List[Any]:
a, a : Optional[int] = self.get_pretrained_model_and_inputs()
a : Optional[int] = model_a(**lowerCAmelCase__ )
a : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
a : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : str = model_a(**lowerCAmelCase__ )
a : Dict = after_outputs[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Any = 13
a : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Optional[Any] = random_attention_mask([batch_size, 4] )
a : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Dict = FlaxViTModel(lowerCAmelCase__ )
a : Dict = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> str:
a : Union[str, Any] = FlaxViTModelTester(self )
a : Dict = FlaxBertModelTester(self )
a : str = vit_model_tester.prepare_config_and_inputs()
a : Any = bert_model_tester.prepare_config_and_inputs()
a, a : Optional[int] = vision_config_and_inputs
a, a, a, a : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Tuple = 13
a : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Tuple = random_attention_mask([batch_size, 4] )
a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = FlaxCLIPVisionModel(lowerCAmelCase__ )
a : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> List[Any]:
a : Tuple = FlaxCLIPVisionModelTester(self )
a : Union[str, Any] = FlaxBertModelTester(self )
a : Dict = clip_model_tester.prepare_config_and_inputs()
a : Optional[int] = bert_model_tester.prepare_config_and_inputs()
a, a : Dict = vision_config_and_inputs
a, a, a, a : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
a : str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" )
a : Optional[Any] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[str] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 79
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( lowercase ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """ViTImageProcessor"""
lowercase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,lowerCamelCase__ ,)
_UpperCamelCase : List[Any] = kwargs.pop('feature_extractor' )
_UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : List[Any] ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : List[str] ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_UpperCamelCase : Optional[int] = self.tokenizer(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if visual_prompt is not None:
_UpperCamelCase : Any = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if images is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if visual_prompt is not None and images is not None:
_UpperCamelCase : Dict = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase : Dict = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : int ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,lowerCamelCase__ ,)
return self.image_processor_class
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,lowerCamelCase__ ,)
return self.image_processor
| 83
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def a_ ( __lowercase : List[Any] ) -> List[Any]:
_snake_case = DPTConfig()
if "large" in checkpoint_url:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = [5, 11, 17, 23]
_snake_case = [256, 512, 1_024, 1_024]
_snake_case = (1, 384, 384)
if "ade" in checkpoint_url:
_snake_case = True
_snake_case = 150
_snake_case = 'huggingface/label-files'
_snake_case = 'ade20k-id2label.json'
_snake_case = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(snake_case_ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = [1, 150, 480, 480]
return config, expected_shape
def a_ ( __lowercase : Any ) -> Dict:
_snake_case = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def a_ ( __lowercase : List[Any] ) -> Optional[int]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_snake_case = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_snake_case = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_snake_case = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
_snake_case = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_snake_case = name.replace('proj' , 'projection' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_snake_case = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_snake_case = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_snake_case = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_snake_case = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_snake_case = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_snake_case = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_snake_case = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_snake_case = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_snake_case = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_snake_case = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_snake_case = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_snake_case = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_snake_case = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_snake_case = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_snake_case = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_snake_case = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_snake_case = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_snake_case = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_snake_case = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_snake_case = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_snake_case = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_snake_case = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_snake_case = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_snake_case = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_snake_case = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_snake_case = name.replace('bn' , 'batch_norm' )
if "head" in name:
_snake_case = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_snake_case = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_snake_case = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def a_ ( __lowercase : Any , __lowercase : str ) -> List[str]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[: config.hidden_size, :]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def a_ ( ) -> Dict:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : int ) -> Optional[int]:
_snake_case , _snake_case = get_dpt_config(snake_case_ )
# load original state_dict from URL
_snake_case = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
_snake_case = state_dict.pop(snake_case_ )
_snake_case = val
# read in qkv matrices
read_in_q_k_v(snake_case_ , snake_case_ )
# load HuggingFace model
_snake_case = DPTForSemanticSegmentation(snake_case_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
_snake_case = 480 if 'ade' in checkpoint_url else 384
_snake_case = DPTImageProcessor(size=snake_case_ )
_snake_case = prepare_img()
_snake_case = image_processor(snake_case_ , return_tensors='pt' )
# forward pass
_snake_case = model(**snake_case_ ).logits if 'ade' in checkpoint_url else model(**snake_case_ ).predicted_depth
# Assert logits
_snake_case = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_snake_case = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(snake_case_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , snake_case_ )
)
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=snake_case_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=snake_case_ , )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 368
|
def a_ ( __lowercase : list[int] , __lowercase : list[int] ) -> tuple[float, float]:
# Check if the input is valid
if not len(__lowercase ) == len(__lowercase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_snake_case , _snake_case , _snake_case = equationa
_snake_case , _snake_case , _snake_case = equationa
# Calculate the determinants of the matrices
_snake_case = aa * ba - aa * ba
_snake_case = ca * ba - ca * ba
_snake_case = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case = determinant_x / determinant
_snake_case = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 130
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
"""simple docstring"""
__UpperCamelCase : Any =parent
__UpperCamelCase : Dict =batch_size
__UpperCamelCase : Optional[int] =seq_length
__UpperCamelCase : List[Any] =is_training
__UpperCamelCase : Optional[Any] =use_input_mask
__UpperCamelCase : List[str] =use_token_type_ids
__UpperCamelCase : List[Any] =use_labels
__UpperCamelCase : Union[str, Any] =vocab_size
__UpperCamelCase : str =hidden_size
__UpperCamelCase : Optional[Any] =num_hidden_layers
__UpperCamelCase : Optional[Any] =num_attention_heads
__UpperCamelCase : List[Any] =intermediate_size
__UpperCamelCase : str =hidden_act
__UpperCamelCase : List[str] =hidden_dropout_prob
__UpperCamelCase : Optional[int] =attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] =max_position_embeddings
__UpperCamelCase : List[Any] =type_vocab_size
__UpperCamelCase : Tuple =type_sequence_label_size
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : List[str] =num_labels
__UpperCamelCase : Union[str, Any] =num_choices
__UpperCamelCase : List[Any] =scope
__UpperCamelCase : str =vocab_size - 1
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : int =None
if self.use_input_mask:
__UpperCamelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[Any] =None
if self.use_labels:
__UpperCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : str =self.get_config()
return config, input_ids, input_mask, token_labels
def __lowercase ( self ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.prepare_config_and_inputs()
__UpperCamelCase : str =True
return config, input_ids, input_mask, token_labels
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =GPTNeoXModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Union[str, Any] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__UpperCamelCase : Any =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =True
__UpperCamelCase : Optional[Any] =GPTNeoXModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Union[str, Any] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Union[str, Any] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =self.num_labels
__UpperCamelCase : Any =GPTNeoXForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Any =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =self.num_labels
__UpperCamelCase : Optional[Any] =GPTNeoXForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Dict =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.num_labels
__UpperCamelCase : Union[str, Any] =GPTNeoXForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Any =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =True
__UpperCamelCase : str =GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
__UpperCamelCase : Optional[Any] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : str =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : int =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : List[Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Optional[int] =torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : int =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
__UpperCamelCase : Tuple =output_from_no_past['hidden_states'][0]
__UpperCamelCase : Any =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
__UpperCamelCase : Optional[int] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Union[str, Any] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.prepare_config_and_inputs()
__UpperCamelCase : Any =config_and_inputs
__UpperCamelCase : str ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __A ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] =(
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Any =(GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : Any =(
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : Any =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =GPTNeoXModelTester(self )
__UpperCamelCase : Union[str, Any] =ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase : Dict =None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def __lowercase ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple =ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase : Tuple =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Union[str, Any] =GPTNeoXModel(_UpperCAmelCase )
original_model.to(_UpperCAmelCase )
original_model.eval()
__UpperCamelCase : Tuple =original_model(_UpperCAmelCase ).last_hidden_state
__UpperCamelCase : Optional[int] =original_model(_UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Union[str, Any] ={'type': scaling_type, 'factor': 10.0}
__UpperCamelCase : int =GPTNeoXModel(_UpperCAmelCase )
scaled_model.to(_UpperCAmelCase )
scaled_model.eval()
__UpperCamelCase : List[str] =scaled_model(_UpperCAmelCase ).last_hidden_state
__UpperCamelCase : Any =scaled_model(_UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
__UpperCamelCase : Dict =GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_UpperCAmelCase )
__UpperCamelCase : List[str] =tokenizer('My favorite food is' , return_tensors='pt' ).to(_UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__UpperCamelCase : Any ='My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__UpperCamelCase : Optional[int] =model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20 )
__UpperCamelCase : Tuple =tokenizer.batch_decode(_UpperCAmelCase )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 71
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = ObjectDetectionPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0)
self.assertGreater(len(_UpperCAmelCase) , 0)
for detected_object in outputs:
self.assertEqual(
_UpperCAmelCase , {
'score': ANY(_UpperCAmelCase),
'label': ANY(_UpperCAmelCase),
'box': {'xmin': ANY(_UpperCAmelCase), 'ymin': ANY(_UpperCAmelCase), 'xmax': ANY(_UpperCAmelCase), 'ymax': ANY(_UpperCAmelCase)},
} , )
import datasets
__A : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
__A : List[str] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__A : Union[str, Any] = object_detector(_UpperCAmelCase , threshold=0.0)
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for outputs in batch_outputs:
self.assertGreater(len(_UpperCAmelCase) , 0)
for detected_object in outputs:
self.assertEqual(
_UpperCAmelCase , {
'score': ANY(_UpperCAmelCase),
'label': ANY(_UpperCAmelCase),
'box': {'xmin': ANY(_UpperCAmelCase), 'ymin': ANY(_UpperCAmelCase), 'xmax': ANY(_UpperCAmelCase), 'ymax': ANY(_UpperCAmelCase)},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__A : Any = AutoModelForObjectDetection.from_pretrained(_UpperCAmelCase)
__A : int = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase)
__A : Any = ObjectDetectionPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase)
__A : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
__A : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = 'facebook/detr-resnet-50'
__A : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(_UpperCAmelCase)
__A : Tuple = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase)
__A : List[Any] = ObjectDetectionPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase)
__A : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'facebook/detr-resnet-50'
__A : str = pipeline('object-detection' , model=_UpperCAmelCase)
__A : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 0.9985
__A : List[Any] = 'facebook/detr-resnet-50'
__A : List[str] = pipeline('object-detection' , model=_UpperCAmelCase)
__A : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_UpperCAmelCase)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
__A : Tuple = 0.9993
__A : str = pipeline('object-detection' , model=_UpperCAmelCase , threshold=_UpperCAmelCase)
__A : Optional[int] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 190
| 0
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase_ = ""
lowercase_ = ""
lowercase_ = []
lowercase_ = 0
lowercase_ = 256
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = cva.imread(UpperCAmelCase , 0 )
lowercase_ = copy.deepcopy(self.img )
lowercase_ , lowercase_ , lowercase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
lowercase_ = np.sum(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
lowercase_ = x[i] / self.k
self.sk += prk
lowercase_ = (self.L - 1) * self.sk
if self.rem != 0:
lowercase_ = int(last % last )
lowercase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCAmelCase )
lowercase_ = int(np.ma.count(self.img ) / self.img[1].size )
lowercase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase_ = self.img[j][i]
if num != self.last_list[num]:
lowercase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A__ ( self ) -> str:
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
SCREAMING_SNAKE_CASE__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[Any] = """markuplm"""
def __init__( self , __lowercase=30_522 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=0 , __lowercase=0 , __lowercase=2 , __lowercase=256 , __lowercase=1_024 , __lowercase=216 , __lowercase=1_001 , __lowercase=32 , __lowercase=50 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ) -> str:
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :str = num_hidden_layers
__UpperCamelCase :str = num_attention_heads
__UpperCamelCase :Dict = hidden_act
__UpperCamelCase :List[str] = intermediate_size
__UpperCamelCase :int = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :Dict = max_position_embeddings
__UpperCamelCase :List[str] = type_vocab_size
__UpperCamelCase :Optional[int] = initializer_range
__UpperCamelCase :Any = layer_norm_eps
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :List[Any] = use_cache
__UpperCamelCase :Optional[int] = classifier_dropout
# additional properties
__UpperCamelCase :Optional[Any] = max_depth
__UpperCamelCase :Optional[Any] = max_xpath_tag_unit_embeddings
__UpperCamelCase :str = max_xpath_subs_unit_embeddings
__UpperCamelCase :Union[str, Any] = tag_pad_id
__UpperCamelCase :Optional[int] = subs_pad_id
__UpperCamelCase :Optional[int] = xpath_unit_hidden_size
| 43
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""new-model"""
if is_tf_available():
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =NewModelConfig
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='bert-base-cased'
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='bert-base-cased'
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =copy.deepcopy(model.config )
__UpperCamelCase : Optional[Any] =['FunnelBaseModel']
__UpperCamelCase : Tuple =TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
try:
AutoConfig.register('new-model' , lowerCamelCase__ )
__UpperCamelCase : int =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : List[str] =BertModelTester(self ).get_config()
__UpperCamelCase : Optional[Any] =NewModelConfig(**tiny_config.to_dict() )
__UpperCamelCase : Dict =auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('bert-base' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained(lowerCamelCase__ , revision='aaaaaa' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , 'Use `from_pt=True` to load this model' ):
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 71
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
SCREAMING_SNAKE_CASE_ = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE_ = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> List[str]:
_UpperCAmelCase : int = (images / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase : Dict = numpy_to_pil(lowerCAmelCase )
return images
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Any:
if images.ndim == 3:
_UpperCAmelCase : Dict = images[None, ...]
_UpperCAmelCase : List[str] = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_UpperCAmelCase : List[str] = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_UpperCAmelCase : List[Any] = [Image.fromarray(lowerCAmelCase ) for image in images]
return pil_images
| 355
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE_ = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
SCREAMING_SNAKE_CASE_ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = None
# source code of `config_class`
_UpperCAmelCase : int = inspect.getsource(lowerCAmelCase )
_UpperCAmelCase : Tuple = _re_checkpoint.findall(lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
_UpperCAmelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Any = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Union[str, Any] = ckpt_name
break
return checkpoint
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase : int = get_checkpoint_from_config_class(lowerCAmelCase )
_UpperCAmelCase : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_UpperCAmelCase : int = "\n".join(sorted(lowerCAmelCase ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 189
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''image_processor''', '''tokenizer''']
snake_case = '''CLIPImageProcessor'''
snake_case = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Any , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : str ):
'''simple docstring'''
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
_A = kwargs.pop("feature_extractor" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : Any , __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_A = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 79
|
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ):
_A = len(set_a.intersection(__lowercase ) )
if alternative_union:
_A = len(__lowercase ) + len(__lowercase )
else:
_A = len(set_a.union(__lowercase ) )
return intersection / union
if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ):
_A = [element for element in set_a if element in set_b]
if alternative_union:
_A = len(__lowercase ) + len(__lowercase )
return len(__lowercase ) / union
else:
_A = set_a + [element for element in set_b if element not in set_a]
return len(__lowercase ) / len(__lowercase )
return len(__lowercase ) / len(__lowercase )
return None
if __name__ == "__main__":
lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 79
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
__snake_case = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : int = 'tapas'
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1024 , __UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=10.0 , __UpperCAmelCase=0 , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=1.0 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase="ratio" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_sizes
_a = initializer_range
_a = layer_norm_eps
# Fine-tuning task hyperparameters
_a = positive_label_weight
_a = num_aggregation_labels
_a = aggregation_loss_weight
_a = use_answer_as_supervision
_a = answer_loss_importance
_a = use_normalized_answer_loss
_a = huber_loss_delta
_a = temperature
_a = aggregation_temperature
_a = use_gumbel_for_cells
_a = use_gumbel_for_aggregation
_a = average_approximation_function
_a = cell_selection_preference
_a = answer_loss_cutoff
_a = max_num_rows
_a = max_num_columns
_a = average_logits_per_cell
_a = select_one_column
_a = allow_empty_column_selection
_a = init_cell_selection_weights_to_zero
_a = reset_position_index_per_cell
_a = disable_per_token_loss
# Aggregation hyperparameters
_a = aggregation_labels
_a = no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCAmelCase ):
_a = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 153
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : int , A_ : Any , A_ : List[Any]=1_3 , A_ : Any=3 , A_ : List[str]=2_2_4 , A_ : Optional[Any]=3_0 , A_ : Optional[Any]=4_0_0 , A_ : Any=True , A_ : str=None , A_ : Tuple=True , A_ : str=[0.5, 0.5, 0.5] , A_ : Optional[int]=[0.5, 0.5, 0.5] , ):
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Dict = min_resolution
lowerCAmelCase_ : Tuple = max_resolution
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Dict = size
lowerCAmelCase_ : str = do_normalize
lowerCAmelCase_ : Any = image_mean
lowerCAmelCase_ : Optional[int] = image_std
def UpperCAmelCase__ ( self : Optional[int]):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[int] = EfficientFormerImageProcessorTester(self)
@property
def UpperCAmelCase__ ( self : Optional[int]):
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A_ , '''image_mean'''))
self.assertTrue(hasattr(A_ , '''image_std'''))
self.assertTrue(hasattr(A_ , '''do_normalize'''))
self.assertTrue(hasattr(A_ , '''do_resize'''))
self.assertTrue(hasattr(A_ , '''size'''))
def UpperCAmelCase__ ( self : List[str]):
pass
def UpperCAmelCase__ ( self : Optional[int]):
# Initialize image_processor
lowerCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_)
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image)
# Test not batched input
lowerCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : str = image_processor(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : str):
# Initialize image_processor
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray)
# Test not batched input
lowerCAmelCase_ : str = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : str = image_processor(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : str):
# Initialize image_processor
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor)
# Test not batched input
lowerCAmelCase_ : Dict = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processor(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 103
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 130
| 0
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200
|
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase : str = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase : Dict = re.compile(R"""([a-z\d])([A-Z])""")
_UpperCAmelCase : Dict = re.compile(R"""(?<!_)_(?!_)""")
_UpperCAmelCase : Tuple = re.compile(R"""(_{2,})""")
_UpperCAmelCase : Any = R"""^\w+(\.\w+)*$"""
_UpperCAmelCase : List[str] = R"""<>:/\|?*"""
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = _uppercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ )
snake_case_ = _lowercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ )
return name.lower()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = _single_underscore_re.split(UpperCamelCase__ )
snake_case_ = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != '' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , UpperCamelCase__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(UpperCamelCase__ )}-{split}'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return F'''{filepath}*'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if shard_lengths:
snake_case_ = len(UpperCamelCase__ )
snake_case_ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(UpperCamelCase__ )]
if filetype_suffix:
snake_case_ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
snake_case_ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 200
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class a__( lowerCamelCase__ ):
lowercase__ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ = Features({"""text""": Value("""string""" )} )
lowercase__ = Features({} )
lowercase__ = "text"
@property
def lowercase_ ( self : Any ):
return {self.text_column: "text"}
| 297
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content
| 297
| 1
|
from __future__ import annotations
from typing import Any
def A ( _UpperCAmelCase : list[Any] ) -> None:
'''simple docstring'''
create_state_space_tree(lowercase__ , [] , 0 )
def A ( _UpperCAmelCase : list[Any] , _UpperCAmelCase : list[Any] , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
if index == len(lowercase__ ):
print(lowercase__ )
return
create_state_space_tree(lowercase__ , lowercase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase__ , lowercase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCAmelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 356
|
def A ( _UpperCAmelCase : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCAmelCase = 1
if upper_limit > 0:
_UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
UpperCAmelCase__ = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 290
| 0
|
"""simple docstring"""
from collections.abc import Generator
def lowercase_ ( ):
"""simple docstring"""
A_ : Union[str, Any] = 0, 1
while True:
A_ : Union[str, Any] = b, a + b
yield b
def lowercase_ ( _UpperCAmelCase = 1000 ):
"""simple docstring"""
A_ : Union[str, Any] = 1
A_ : str = fibonacci_generator()
while len(str(next(__lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 167
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __a ( A__ ):
_lowerCAmelCase : Optional[Any] = '''conditional_detr'''
_lowerCAmelCase : List[Any] = ['''past_key_values''']
_lowerCAmelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Any=3_00 , SCREAMING_SNAKE_CASE : Tuple=6 , SCREAMING_SNAKE_CASE : int=20_48 , SCREAMING_SNAKE_CASE : Union[str, Any]=8 , SCREAMING_SNAKE_CASE : int=6 , SCREAMING_SNAKE_CASE : Dict=20_48 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int="relu" , SCREAMING_SNAKE_CASE : Any=2_56 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=1.0 , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : int="sine" , SCREAMING_SNAKE_CASE : str="resnet50" , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Tuple=5 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : List[Any]=0.2_5 , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = backbone_config.get("model_type" )
UpperCamelCase__ : Tuple = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Any = config_class.from_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = use_timm_backbone
UpperCamelCase__ : List[Any] = backbone_config
UpperCamelCase__ : Tuple = num_channels
UpperCamelCase__ : List[Any] = num_queries
UpperCamelCase__ : Dict = d_model
UpperCamelCase__ : Any = encoder_ffn_dim
UpperCamelCase__ : List[str] = encoder_layers
UpperCamelCase__ : List[str] = encoder_attention_heads
UpperCamelCase__ : Optional[int] = decoder_ffn_dim
UpperCamelCase__ : str = decoder_layers
UpperCamelCase__ : Optional[Any] = decoder_attention_heads
UpperCamelCase__ : int = dropout
UpperCamelCase__ : Optional[int] = attention_dropout
UpperCamelCase__ : Any = activation_dropout
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : int = init_std
UpperCamelCase__ : List[Any] = init_xavier_std
UpperCamelCase__ : List[str] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : List[Any] = encoder_layers
UpperCamelCase__ : Optional[Any] = auxiliary_loss
UpperCamelCase__ : List[str] = position_embedding_type
UpperCamelCase__ : Optional[Any] = backbone
UpperCamelCase__ : Optional[int] = use_pretrained_backbone
UpperCamelCase__ : List[Any] = dilation
# Hungarian matcher
UpperCamelCase__ : List[str] = class_cost
UpperCamelCase__ : Union[str, Any] = bbox_cost
UpperCamelCase__ : int = giou_cost
# Loss coefficients
UpperCamelCase__ : str = mask_loss_coefficient
UpperCamelCase__ : List[Any] = dice_loss_coefficient
UpperCamelCase__ : int = cls_loss_coefficient
UpperCamelCase__ : Tuple = bbox_loss_coefficient
UpperCamelCase__ : Any = giou_loss_coefficient
UpperCamelCase__ : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self.d_model
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ : List[str] = self.backbone_config.to_dict()
UpperCamelCase__ : int = self.__class__.model_type
return output
class __a ( A__ ):
_lowerCAmelCase : Tuple = version.parse('''1.11''' )
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 1e-5
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return 12
| 189
| 0
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = checkpoint
__lowerCAmelCase = {}
__lowerCAmelCase = vae_state_dict["encoder.conv_in.weight"]
__lowerCAmelCase = vae_state_dict["encoder.conv_in.bias"]
__lowerCAmelCase = vae_state_dict["encoder.conv_out.weight"]
__lowerCAmelCase = vae_state_dict["encoder.conv_out.bias"]
__lowerCAmelCase = vae_state_dict["encoder.norm_out.weight"]
__lowerCAmelCase = vae_state_dict["encoder.norm_out.bias"]
__lowerCAmelCase = vae_state_dict["decoder.conv_in.weight"]
__lowerCAmelCase = vae_state_dict["decoder.conv_in.bias"]
__lowerCAmelCase = vae_state_dict["decoder.conv_out.weight"]
__lowerCAmelCase = vae_state_dict["decoder.conv_out.bias"]
__lowerCAmelCase = vae_state_dict["decoder.norm_out.weight"]
__lowerCAmelCase = vae_state_dict["decoder.norm_out.bias"]
__lowerCAmelCase = vae_state_dict["quant_conv.weight"]
__lowerCAmelCase = vae_state_dict["quant_conv.bias"]
__lowerCAmelCase = vae_state_dict["post_quant_conv.weight"]
__lowerCAmelCase = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
__lowerCAmelCase = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(_snake_case )
}
# Retrieves the keys for the decoder up blocks only
__lowerCAmelCase = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(_snake_case )
}
for i in range(_snake_case ):
__lowerCAmelCase = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
__lowerCAmelCase = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
__lowerCAmelCase = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
__lowerCAmelCase = renew_vae_resnet_paths(_snake_case )
__lowerCAmelCase = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
__lowerCAmelCase = [key for key in vae_state_dict if "encoder.mid.block" in key]
__lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
__lowerCAmelCase = renew_vae_resnet_paths(_snake_case )
__lowerCAmelCase = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
__lowerCAmelCase = [key for key in vae_state_dict if "encoder.mid.attn" in key]
__lowerCAmelCase = renew_vae_attention_paths(_snake_case )
__lowerCAmelCase = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
conv_attn_to_linear(_snake_case )
for i in range(_snake_case ):
__lowerCAmelCase = num_up_blocks - 1 - i
__lowerCAmelCase = [
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
__lowerCAmelCase = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
__lowerCAmelCase = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
__lowerCAmelCase = renew_vae_resnet_paths(_snake_case )
__lowerCAmelCase = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
__lowerCAmelCase = [key for key in vae_state_dict if "decoder.mid.block" in key]
__lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
__lowerCAmelCase = renew_vae_resnet_paths(_snake_case )
__lowerCAmelCase = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
__lowerCAmelCase = [key for key in vae_state_dict if "decoder.mid.attn" in key]
__lowerCAmelCase = renew_vae_attention_paths(_snake_case )
__lowerCAmelCase = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case )
conv_attn_to_linear(_snake_case )
return new_checkpoint
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , ):
'''simple docstring'''
__lowerCAmelCase = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
__lowerCAmelCase = io.BytesIO(r.content )
__lowerCAmelCase = OmegaConf.load(_snake_case )
__lowerCAmelCase = 512
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
__lowerCAmelCase = {}
with safe_open(_snake_case , framework="pt" , device="cpu" ) as f:
for key in f.keys():
__lowerCAmelCase = f.get_tensor(_snake_case )
else:
__lowerCAmelCase = torch.load(_snake_case , map_location=_snake_case )["state_dict"]
# Convert the VAE model.
__lowerCAmelCase = create_vae_diffusers_config(_snake_case , image_size=_snake_case )
__lowerCAmelCase = custom_convert_ldm_vae_checkpoint(_snake_case , _snake_case )
__lowerCAmelCase = AutoencoderKL(**_snake_case )
vae.load_state_dict(_snake_case )
vae.save_pretrained(_snake_case )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
A : Dict = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 369
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def _lowerCamelCase ( _UpperCamelCase = 200_0000 ):
'''simple docstring'''
return sum(takewhile(lambda _UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] ="ctrl"
a : Dict =["past_key_values"]
a : Any ={
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=246_534 , snake_case__=256 , snake_case__=1_280 , snake_case__=8_192 , snake_case__=48 , snake_case__=16 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=0.02 , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : str = vocab_size
lowerCAmelCase : str = n_positions
lowerCAmelCase : Dict = n_embd
lowerCAmelCase : List[Any] = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Optional[Any] = dff
lowerCAmelCase : List[str] = resid_pdrop
lowerCAmelCase : List[str] = embd_pdrop
lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = use_cache
super().__init__(**snake_case__ )
| 108
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
_lowercase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self , __a ) -> np.ndarray:
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def snake_case_ (self , __a ) -> np.ndarray:
UpperCamelCase = self.get_masked_index(__a )
UpperCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ (self , __a ) -> Any:
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def snake_case_ (self , __a , __a=None , **__a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def snake_case_ (self , __a ) -> Dict:
UpperCamelCase = self.model(**__a )
UpperCamelCase = model_inputs["input_ids"]
return model_outputs
def snake_case_ (self , __a , __a=5 , __a=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase = target_ids.shape[0]
UpperCamelCase = model_outputs["input_ids"][0]
UpperCamelCase = model_outputs["logits"]
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase = outputs.numpy()
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCamelCase = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase = tf.expand_dims(__a , 0 )
UpperCamelCase = tf.math.top_k(__a , k=__a )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase = probs[..., target_ids]
UpperCamelCase , UpperCamelCase = probs.topk(__a )
UpperCamelCase = []
UpperCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase = target_ids[p].tolist()
UpperCamelCase = p
# Filter padding out:
UpperCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCamelCase = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def snake_case_ (self , __a , __a=None ) -> Any:
if isinstance(__a , __a ):
UpperCamelCase = [targets]
try:
UpperCamelCase = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase = {}
UpperCamelCase = []
for target in targets:
UpperCamelCase = vocab.get(__a , __a )
if id_ is None:
UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
UpperCamelCase = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCamelCase = np.array(__a )
return target_ids
def snake_case_ (self , __a=None , __a=None ) -> int:
UpperCamelCase = {}
if targets is not None:
UpperCamelCase = self.get_target_ids(__a , __a )
UpperCamelCase = target_ids
if top_k is not None:
UpperCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self , __a , *__a , **__a ) -> Tuple:
UpperCamelCase = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 153
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[int] = 'distilbert'
lowerCAmelCase : Optional[int] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : str ,_UpperCAmelCase : Optional[Any]=30522 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : int=False ,_UpperCAmelCase : str=6 ,_UpperCAmelCase : int=12 ,_UpperCAmelCase : Union[str, Any]=768 ,_UpperCAmelCase : Optional[Any]=4 * 768 ,_UpperCAmelCase : int=0.1 ,_UpperCAmelCase : Union[str, Any]=0.1 ,_UpperCAmelCase : Tuple="gelu" ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Dict=0.2 ,_UpperCAmelCase : Union[str, Any]=0 ,**_UpperCAmelCase : List[Any] ,):
_a : Optional[Any] = vocab_size
_a : Dict = max_position_embeddings
_a : Optional[int] = sinusoidal_pos_embds
_a : List[Any] = n_layers
_a : Any = n_heads
_a : int = dim
_a : Optional[Any] = hidden_dim
_a : str = dropout
_a : Optional[Any] = attention_dropout
_a : Any = activation
_a : Tuple = initializer_range
_a : Union[str, Any] = qa_dropout
_a : List[Any] = seq_classif_dropout
super().__init__(**_UpperCAmelCase ,pad_token_id=_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 107
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self , __snake_case , __snake_case = None , __snake_case = None ):
super().__init__()
_SCREAMING_SNAKE_CASE : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_SCREAMING_SNAKE_CASE : int = torch.zeros(__snake_case , __snake_case )
else:
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nn.Parameter(__snake_case )
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : VQModel
A_ : CLIPTextModel
A_ : CLIPTokenizer
A_ : TransformeraDModel
A_ : LearnedClassifierFreeSamplingEmbeddings
A_ : VQDiffusionScheduler
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
__snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
_SCREAMING_SNAKE_CASE : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_SCREAMING_SNAKE_CASE : List[str] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
_SCREAMING_SNAKE_CASE : Tuple = prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_SCREAMING_SNAKE_CASE : int = self.learned_classifier_free_sampling_embeddings.embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
_SCREAMING_SNAKE_CASE : Tuple = [""""""] * batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
__snake_case , padding="""max_length""" , max_length=__snake_case , truncation=__snake_case , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_SCREAMING_SNAKE_CASE : Any = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE : Dict = negative_prompt_embeds.shape[1]
_SCREAMING_SNAKE_CASE : int = negative_prompt_embeds.repeat(1 , __snake_case , 1 )
_SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = 100 , __snake_case = 5.0 , __snake_case = 1.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , ):
if isinstance(__snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : int = 1
elif isinstance(__snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : int = len(__snake_case )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__snake_case )}""" )
_SCREAMING_SNAKE_CASE : Dict = batch_size * num_images_per_prompt
_SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
_SCREAMING_SNAKE_CASE : Any = self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__snake_case )}.""" )
# get the initial completely masked latents unless the user supplied it
_SCREAMING_SNAKE_CASE : Tuple = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_SCREAMING_SNAKE_CASE : Any = self.transformer.num_vector_embeds - 1
_SCREAMING_SNAKE_CASE : Any = torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_SCREAMING_SNAKE_CASE : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
_SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(self.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_SCREAMING_SNAKE_CASE : List[Any] = self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = model_output.chunk(2 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
_SCREAMING_SNAKE_CASE : Any = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.config.vq_embed_dim
_SCREAMING_SNAKE_CASE : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
_SCREAMING_SNAKE_CASE : Any = (image / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = torch.sort(__snake_case , 1 , descending=__snake_case )
_SCREAMING_SNAKE_CASE : str = torch.exp(__snake_case )
_SCREAMING_SNAKE_CASE : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_SCREAMING_SNAKE_CASE : int = torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = torch.cat((all_true, keep_mask) , dim=1 )
_SCREAMING_SNAKE_CASE : Tuple = keep_mask[:, :-1, :]
_SCREAMING_SNAKE_CASE : str = keep_mask.gather(1 , indices.argsort(1 ) )
_SCREAMING_SNAKE_CASE : int = log_p_x_0.clone()
_SCREAMING_SNAKE_CASE : Optional[Any] = -torch.inf # -inf = log(0)
return rv
| 200
|
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
_SCREAMING_SNAKE_CASE : Optional[int] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_SCREAMING_SNAKE_CASE : Optional[Any] = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : str = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : int = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_SCREAMING_SNAKE_CASE : List[str] = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : str = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : List[Any] = residx_atomaa_mask
_SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_SCREAMING_SNAKE_CASE : int = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_SCREAMING_SNAKE_CASE : str = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
_SCREAMING_SNAKE_CASE : int = rc.restype_atoa[restype_letter]
_SCREAMING_SNAKE_CASE : Dict = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_SCREAMING_SNAKE_CASE : List[Any] = rc.atom_order[atom_name]
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tree_map(lambda SCREAMING_SNAKE_CASE__ : torch.tensor(SCREAMING_SNAKE_CASE__ , device=batch["""aatype"""].device ) , SCREAMING_SNAKE_CASE__ , np.ndarray )
_SCREAMING_SNAKE_CASE : Optional[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : np.array(SCREAMING_SNAKE_CASE__ ) , make_atomaa_masks(SCREAMING_SNAKE_CASE__ ) )
return out
| 200
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase )
yield node.data
snake_case = node.next_node
@property
def snake_case ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Node(1)
SCREAMING_SNAKE_CASE__ = Node(2)
SCREAMING_SNAKE_CASE__ = Node(3)
SCREAMING_SNAKE_CASE__ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = Node(1)
print(root_node.has_loop) # False
| 149
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 5_02_57 , lowerCAmelCase = 10_24 , lowerCAmelCase = 7_68 , lowerCAmelCase = 12 , lowerCAmelCase = 12 , lowerCAmelCase = None , lowerCAmelCase = "gelu_new" , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 0.02 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__()
snake_case = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
snake_case = prefix_inner_dim
snake_case = prefix_hidden_dim
snake_case = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case = GPTaConfig(
vocab_size=lowerCAmelCase , n_positions=lowerCAmelCase , n_embd=lowerCAmelCase , n_layer=lowerCAmelCase , n_head=lowerCAmelCase , n_inner=lowerCAmelCase , activation_function=lowerCAmelCase , resid_pdrop=lowerCAmelCase , embd_pdrop=lowerCAmelCase , attn_pdrop=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase , initializer_range=lowerCAmelCase , scale_attn_weights=lowerCAmelCase , use_cache=lowerCAmelCase , scale_attn_by_inverse_layer_idx=lowerCAmelCase , reorder_and_upcast_attn=lowerCAmelCase , )
snake_case = GPTaLMHeadModel(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
snake_case = self.encode_prefix(lowerCAmelCase )
snake_case = self.decode_prefix(lowerCAmelCase )
snake_case = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case = self.transformer(inputs_embeds=lowerCAmelCase , labels=lowerCAmelCase , attention_mask=lowerCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return torch.zeros(lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase )
@torch.no_grad()
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = torch.split(lowerCAmelCase , 1 , dim=0 )
snake_case = []
snake_case = []
for feature in features:
snake_case = self.decode_prefix(feature.to(lowerCAmelCase ) ) # back to the clip feature
# Only support beam search for now
snake_case ,snake_case = self.generate_beam(
input_embeds=lowerCAmelCase , device=lowerCAmelCase , eos_token_id=lowerCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case = torch.stack(lowerCAmelCase )
snake_case = torch.stack(lowerCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = 5 , lowerCAmelCase = 67 , lowerCAmelCase = 1.0 , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = eos_token_id
snake_case = None
snake_case = None
snake_case = torch.ones(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.int )
snake_case = torch.zeros(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.bool )
if input_embeds is not None:
snake_case = input_embeds
else:
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
for i in range(lowerCAmelCase ):
snake_case = self.transformer(inputs_embeds=lowerCAmelCase )
snake_case = outputs.logits
snake_case = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case = logits.softmax(-1 ).log()
if scores is None:
snake_case ,snake_case = logits.topk(lowerCAmelCase , -1 )
snake_case = generated.expand(lowerCAmelCase , *generated.shape[1:] )
snake_case ,snake_case = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case = next_tokens
else:
snake_case = tokens.expand(lowerCAmelCase , *tokens.shape[1:] )
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case = -float(np.inf )
snake_case = 0
snake_case = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case = scores_sum / seq_lengths[:, None]
snake_case ,snake_case = scores_sum_average.view(-1 ).topk(lowerCAmelCase , -1 )
snake_case = next_tokens // scores_sum.shape[1]
snake_case = seq_lengths[next_tokens_source]
snake_case = next_tokens % scores_sum.shape[1]
snake_case = next_tokens.unsqueeze(1 )
snake_case = tokens[next_tokens_source]
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
snake_case = generated[next_tokens_source]
snake_case = scores_sum_average * seq_lengths
snake_case = is_stopped[next_tokens_source]
snake_case = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case = torch.cat((generated, next_token_embed) , dim=1 )
snake_case = is_stopped + next_tokens.eq(lowerCAmelCase ).squeeze()
if is_stopped.all():
break
snake_case = scores / seq_lengths
snake_case = scores.argsort(descending=lowerCAmelCase )
# tokens tensors are already padded to max_seq_length
snake_case = [tokens[i] for i in order]
snake_case = torch.stack(lowerCAmelCase , dim=0 )
snake_case = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE : Optional[Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ) -> Dict:
_lowercase : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowercase : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCamelCase, scheduler=lowerCamelCase, movq=lowerCamelCase, )
_lowercase : List[str] = 2 ** (len(self.movq.config.block_out_channels) - 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if latents is None:
_lowercase : Optional[Any] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
_lowercase : int = latents.to(lowerCamelCase)
_lowercase : int = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase ( self, lowerCamelCase=0) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
_lowercase : Tuple = torch.device(F'''cuda:{gpu_id}''')
_lowercase : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase=0) -> int:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
_lowercase : Union[str, Any] = torch.device(F'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=lowerCamelCase)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowercase : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowercase , _lowercase : List[str] = cpu_offload_with_hook(lowerCamelCase, lowerCamelCase, prev_module_hook=lowerCamelCase)
# We'll offload the last model manually.
_lowercase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
if not hasattr(self.unet, '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase, '_hf_hook')
and hasattr(module._hf_hook, 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 1_00, lowerCamelCase = 4.0, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> List[Any]:
"""simple docstring"""
_lowercase : int = self._execution_device
_lowercase : Any = guidance_scale > 1.0
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
_lowercase : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Optional[int] = torch.cat(lowerCamelCase, dim=0)
if do_classifier_free_guidance:
_lowercase : str = image_embeds.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : int = negative_image_embeds.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=lowerCamelCase)
self.scheduler.set_timesteps(lowerCamelCase, device=lowerCamelCase)
_lowercase : Optional[Any] = self.scheduler.timesteps
_lowercase : List[str] = self.unet.config.in_channels
_lowercase , _lowercase : Union[str, Any] = downscale_height_and_width(lowerCamelCase, lowerCamelCase, self.movq_scale_factor)
# create initial latent
_lowercase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, lowerCamelCase, lowerCamelCase, lowerCamelCase, self.scheduler, )
for i, t in enumerate(self.progress_bar(lowerCamelCase)):
# expand the latents if we are doing classifier free guidance
_lowercase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : Tuple = {'image_embeds': image_embeds}
_lowercase : Optional[Any] = self.unet(
sample=lowerCamelCase, timestep=lowerCamelCase, encoder_hidden_states=lowerCamelCase, added_cond_kwargs=lowerCamelCase, return_dict=lowerCamelCase, )[0]
if do_classifier_free_guidance:
_lowercase , _lowercase : List[str] = noise_pred.split(latents.shape[1], dim=1)
_lowercase , _lowercase : Tuple = noise_pred.chunk(2)
_lowercase , _lowercase : Any = variance_pred.chunk(2)
_lowercase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowercase : Tuple = torch.cat([noise_pred, variance_pred_text], dim=1)
if not (
hasattr(self.scheduler.config, 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowercase , _lowercase : int = noise_pred.split(latents.shape[1], dim=1)
# compute the previous noisy sample x_t -> x_t-1
_lowercase : str = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase, )[0]
# post-processing
_lowercase : str = self.movq.decode(lowerCamelCase, force_not_quantize=lowerCamelCase)['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
_lowercase : Dict = image * 0.5 + 0.5
_lowercase : Any = image.clamp(0, 1)
_lowercase : List[str] = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
_lowercase : Optional[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 21
|
"""simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
| 0
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCamelCase (a_ :list , a_ :list , a_ :list , a_ :list , a_ :list) -> float:
lowercase :Dict = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_)])
lowercase :Union[str, Any] = np.array(a_)
lowercase :Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_)) , x.transpose()) , a_)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def lowerCamelCase (a_ :list , a_ :list , a_ :list) -> float:
lowercase :str = (1, 2, 1)
lowercase :int = (1, 1, 0, 7)
lowercase :List[str] = SARIMAX(
a_ , exog=a_ , order=a_ , seasonal_order=a_)
lowercase :Dict = model.fit(disp=a_ , maxiter=600 , method='''nm''')
lowercase :int = model_fit.predict(1 , len(a_) , exog=[test_match])
return result[0]
def lowerCamelCase (a_ :list , a_ :list , a_ :list) -> float:
lowercase :int = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1)
regressor.fit(a_ , a_)
lowercase :List[str] = regressor.predict(a_)
return y_pred[0]
def lowerCamelCase (a_ :list) -> float:
train_user.sort()
lowercase :List[str] = np.percentile(a_ , 25)
lowercase :Optional[Any] = np.percentile(a_ , 75)
lowercase :Optional[int] = qa - qa
lowercase :List[str] = qa - (iqr * 0.1)
return low_lim
def lowerCamelCase (a_ :list , a_ :float) -> bool:
lowercase :Any = 0
lowercase :str = 0
for i in list_vote:
if i > actual_result:
lowercase :int = not_safe + 1
else:
if abs(abs(a_) - abs(a_)) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCAmelCase = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
UpperCAmelCase = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase = normalize_df[:, 2].tolist()
UpperCAmelCase = normalize_df[:, 0].tolist()
UpperCAmelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase = x[: len(x) - 1]
UpperCAmelCase = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase = total_date[: len(total_date) - 1]
UpperCAmelCase = total_user[: len(total_user) - 1]
UpperCAmelCase = total_match[: len(total_match) - 1]
UpperCAmelCase = total_date[len(total_date) - 1 :]
UpperCAmelCase = total_user[len(total_user) - 1 :]
UpperCAmelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 358
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "swin2sr"
__A : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Dict = image_size
lowercase :List[str] = patch_size
lowercase :Tuple = num_channels
lowercase :int = embed_dim
lowercase :Any = depths
lowercase :Union[str, Any] = len(snake_case__ )
lowercase :List[str] = num_heads
lowercase :int = window_size
lowercase :Tuple = mlp_ratio
lowercase :List[Any] = qkv_bias
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :Tuple = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Union[str, Any] = use_absolute_embeddings
lowercase :Dict = layer_norm_eps
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = upscale
lowercase :Any = img_range
lowercase :Optional[int] = resi_connection
lowercase :Union[str, Any] = upsampler
| 172
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class _a :
def __init__( self : Union[str, Any] , lowercase : int , lowercase : MutableSequence[float] ):
'''simple docstring'''
if len(lowercase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase = list(lowercase )
UpperCAmelCase = degree
def __add__( self : List[Any] , lowercase : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase )
def __sub__( self : str , lowercase : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Optional[int] ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[Any] , lowercase : Polynomial ):
'''simple docstring'''
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase )
def A ( self : Optional[int] , lowercase : int | float ):
'''simple docstring'''
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ):
'''simple docstring'''
UpperCAmelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase )
return polynomial
def __repr__( self : List[Any] ):
'''simple docstring'''
return self.__str__()
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase )
def A ( self : str , lowercase : int | float = 0 ):
'''simple docstring'''
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase )
def __eq__( self : List[Any] , lowercase : object ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowercase : object ):
'''simple docstring'''
return not self.__eq__(lowercase )
| 34
|
import numpy as np
__snake_case = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase :Dict = np.array(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
UpperCamelCase , UpperCamelCase :Tuple = np.where(letter == self.SQUARE )
UpperCamelCase :List[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Any = message.lower()
UpperCamelCase :int = message.replace(''' ''' , '''''' )
UpperCamelCase :Dict = message.replace('''j''' , '''i''' )
UpperCamelCase :str = np.empty((2, len(SCREAMING_SNAKE_CASE_ )) )
for letter_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :Dict = self.letter_to_numbers(message[letter_index] )
UpperCamelCase :Union[str, Any] = numbers[0]
UpperCamelCase :Dict = numbers[1]
UpperCamelCase :Any = first_step.reshape(2 * len(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = ''''''
for numbers_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :Dict = int(second_step[numbers_index * 2] )
UpperCamelCase :List[str] = int(second_step[(numbers_index * 2) + 1] )
UpperCamelCase :Tuple = self.numbers_to_letter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = encoded_message + letter
return encoded_message
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Any = message.lower()
message.replace(''' ''' , '''''' )
UpperCamelCase :Optional[int] = np.empty(2 * len(SCREAMING_SNAKE_CASE_ ) )
for letter_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :List[str] = self.letter_to_numbers(message[letter_index] )
UpperCamelCase :Dict = numbers[0]
UpperCamelCase :List[str] = numbers[1]
UpperCamelCase :int = first_step.reshape((2, len(SCREAMING_SNAKE_CASE_ )) )
UpperCamelCase :Any = ''''''
for numbers_index in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :Any = int(second_step[0, numbers_index] )
UpperCamelCase :List[Any] = int(second_step[1, numbers_index] )
UpperCamelCase :Tuple = self.numbers_to_letter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = decoded_message + letter
return decoded_message
| 259
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.txt'}
_a = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
_a = {
'facebook/esm2_t6_8M_UR50D': 10_24,
'facebook/esm2_t12_35M_UR50D': 10_24,
}
def _A ( UpperCamelCase_ : Optional[Any]) -> List[Any]:
'''simple docstring'''
with open(UpperCamelCase_, "r") as f:
__lowercase = f.read().splitlines()
return [l.strip() for l in lines]
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ["input_ids", "attention_mask"]
def __init__( self : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any]="<unk>", UpperCAmelCase__ : Optional[int]="<cls>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="<mask>", UpperCAmelCase__ : Union[str, Any]="<eos>", **UpperCAmelCase__ : Dict, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = load_vocab_file(UpperCAmelCase__ )
__lowercase = dict(enumerate(self.all_tokens ) )
__lowercase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__lowercase = unk_token
__lowercase = cls_token
__lowercase = pad_token
__lowercase = mask_token
__lowercase = eos_token
__lowercase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowercase ( self : List[str], UpperCAmelCase__ : int ):
return self._id_to_token.get(UpperCAmelCase__, self.unk_token )
def _lowercase ( self : List[str], UpperCAmelCase__ : str ):
return self._token_to_id.get(UpperCAmelCase__, self._token_to_id.get(self.unk_token ) )
def _lowercase ( self : str, UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[str] ):
return text.split()
def _lowercase ( self : str, UpperCAmelCase__ : Optional[int]=False ):
return len(self._id_to_token )
def _lowercase ( self : Any ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowercase ( self : List[str], UpperCAmelCase__ : str ):
return self._token_to_id.get(UpperCAmelCase__, self._token_to_id.get(self.unk_token ) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int ):
return self._id_to_token.get(UpperCAmelCase__, self.unk_token )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None ):
__lowercase = [self.cls_token_id]
__lowercase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List, UpperCAmelCase__ : Optional[List] = None, UpperCAmelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__lowercase = [1] + ([0] * len(UpperCAmelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase__ ) + [1]
return mask
def _lowercase ( self : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple ):
__lowercase = os.path.join(UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase__, "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowercase ( self : Optional[Any] ):
return self.get_vocab_size(with_added_tokens=UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : Union[List[str], List[AddedToken]], UpperCAmelCase__ : bool = False ):
return super()._add_tokens(UpperCAmelCase__, special_tokens=UpperCAmelCase__ )
| 367
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, **UpperCamelCase_ : Optional[int]) -> Tuple:
'''simple docstring'''
__lowercase = AutoConfig.from_pretrained(UpperCamelCase_, **UpperCamelCase_)
__lowercase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase_)
model.save_pretrained(UpperCamelCase_)
AutoTokenizer.from_pretrained(UpperCamelCase_).save_pretrained(UpperCamelCase_)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 144
| 0
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
__lowerCAmelCase : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__lowerCAmelCase : List[Any] = CLIPImageProcessor()
__lowerCAmelCase : Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
__lowerCAmelCase : Union[str, Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 107
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( A : NDArray[floataa], A : NDArray[floataa], A : list[int], A : int, ):
'''simple docstring'''
a , a = coefficient_matrix.shape
a , a = constant_matrix.shape
if rowsa != colsa:
a = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
a = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
a = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
a = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
a = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
a , a = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
a = []
for row in range(A ):
a = 0
for col in range(A ):
if col == row:
a = table[row][col]
elif col == cols - 1:
a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a = (temp + val) / denom
new_val.append(A )
a = new_val
return [float(A ) for i in new_val]
def __magic_name__ ( A : NDArray[floataa] ):
'''simple docstring'''
a , a = table.shape
a = True
for i in range(0, A ):
a = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
| 1
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=False , ) -> Optional[int]:
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> Any:
'''simple docstring'''
_snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_snake_case = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_snake_case = 'cpu'
_snake_case = Path(__A )
# VAE DECODER
_snake_case = AutoencoderKL.from_pretrained(model_path + '/vae' )
_snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
_snake_case = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCAmelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 357
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MvpTokenizer
UpperCamelCase__ = MvpTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = filter_roberta_detectors
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
super().setUp()
UpperCamelCase__: Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__: Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase__: Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__: List[str] = {"unk_token": "<unk>"}
UpperCamelCase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: List[Any] , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , **__lowerCamelCase: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__: List[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__: Tuple = tokenizer(__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase__: Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Test that special tokens are reset
@require_torch
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__: int = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertNotIn("labels" , __lowerCamelCase )
self.assertNotIn("decoder_attention_mask" , __lowerCamelCase )
@require_torch
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__: Any = tokenizer(text_target=__lowerCamelCase , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__: List[str] = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Any = ["A long paragraph for summarization."]
UpperCamelCase__: Optional[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__: int = tokenizer(__lowerCamelCase , text_target=__lowerCamelCase , return_tensors="pt" )
UpperCamelCase__: Dict = inputs["input_ids"]
UpperCamelCase__: List[str] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__: int = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: Tuple = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = "A, <mask> AllenNLP sentence."
UpperCamelCase__: int = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
UpperCamelCase__: str = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__: str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__: Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 149
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=[1, 1, 2] ,__UpperCAmelCase=1 ,__UpperCAmelCase=32 ,__UpperCAmelCase=4 ,__UpperCAmelCase=8 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=3 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,) -> List[str]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = block_sizes
A__ = num_decoder_layers
A__ = d_model
A__ = n_head
A__ = d_head
A__ = d_inner
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = 2
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = initializer_std
# Used in the tests to check the size of the first attention layer
A__ = n_head
# Used in the tests to check the size of the first hidden state
A__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A__ = self.num_hidden_layers + 2
def snake_case__ ( self ) -> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = ids_tensor([self.batch_size] ,self.num_choices )
A__ = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> List[str]:
A__ = TFFunnelModel(config=__UpperCAmelCase )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__UpperCAmelCase )
A__ = [input_ids, input_mask]
A__ = model(__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
A__ = False
A__ = TFFunnelModel(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
A__ = False
A__ = TFFunnelModel(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Optional[Any]:
A__ = TFFunnelBaseModel(config=__UpperCAmelCase )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__UpperCAmelCase )
A__ = [input_ids, input_mask]
A__ = model(__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
A__ = False
A__ = TFFunnelBaseModel(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
A__ = False
A__ = TFFunnelBaseModel(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Any:
A__ = TFFunnelForPreTraining(config=__UpperCAmelCase )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Union[str, Any]:
A__ = TFFunnelForMaskedLM(config=__UpperCAmelCase )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Union[str, Any]:
A__ = self.num_labels
A__ = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> List[Any]:
A__ = self.num_choices
A__ = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
A__ = tf.tile(tf.expand_dims(__UpperCAmelCase ,1 ) ,(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(__UpperCAmelCase ,1 ) ,(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(__UpperCAmelCase ,1 ) ,(1, self.num_choices, 1) )
A__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int:
A__ = self.num_labels
A__ = TFFunnelForTokenClassification(config=__UpperCAmelCase )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Optional[int]:
A__ = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : Optional[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Union[str, Any] = False
def snake_case__ ( self ) -> List[str]:
A__ = TFFunnelModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase )
def snake_case__ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def snake_case__ ( self ) -> Dict:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Optional[int] = False
def snake_case__ ( self ) -> Optional[int]:
A__ = TFFunnelModelTester(self ,base=__UpperCAmelCase )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase )
def snake_case__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 154
|
"""simple docstring"""
import os
def UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.dirname(UpperCamelCase__ ) + '/grid.txt' ) as f:
A__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
A__ = 0
# right
for i in range(20 ):
for j in range(17 ):
A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ = temp
# down
for i in range(17 ):
for j in range(20 ):
A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 154
| 1
|
def lowercase_ ( ):
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(UpperCAmelCase_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 184
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = """Wav2Vec2FeatureExtractor"""
UpperCAmelCase : List[str] = """AutoTokenizer"""
def __init__(self : int , _A : List[str] , _A : str) -> str:
super().__init__(_A , _A)
__snake_case : Tuple = self.feature_extractor
__snake_case : str = False
@classmethod
def _lowercase (cls : Union[str, Any] , _A : Optional[Any] , **_A : str) -> List[Any]:
try:
return super().from_pretrained(_A , **_A)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _A , )
__snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(_A , **_A)
__snake_case : Any = WavaVecaCTCTokenizer.from_pretrained(_A , **_A)
return cls(feature_extractor=_A , tokenizer=_A)
def __call__(self : int , *_A : List[Any] , **_A : str) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
__snake_case : int = kwargs.pop('raw_speech')
else:
__snake_case : Optional[Any] = kwargs.pop('audio' , _A)
__snake_case : Tuple = kwargs.pop('sampling_rate' , _A)
__snake_case : Any = kwargs.pop('text' , _A)
if len(_A) > 0:
__snake_case : Any = args[0]
__snake_case : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
__snake_case : str = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A)
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , **_A)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : List[str] = encodings['input_ids']
return inputs
def _lowercase (self : str , *_A : Optional[Any] , **_A : int) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A)
__snake_case : Optional[int] = kwargs.pop('input_features' , _A)
__snake_case : List[Any] = kwargs.pop('labels' , _A)
if len(_A) > 0:
__snake_case : Tuple = args[0]
__snake_case : Union[str, Any] = args[1:]
if input_features is not None:
__snake_case : Optional[Any] = self.feature_extractor.pad(_A , *_A , **_A)
if labels is not None:
__snake_case : Tuple = self.tokenizer.pad(_A , **_A)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__snake_case : str = labels['input_ids']
return input_features
def _lowercase (self : Union[str, Any] , *_A : Any , **_A : List[Any]) -> List[Any]:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : Union[str, Any] , *_A : Dict , **_A : Union[str, Any]) -> Any:
return self.tokenizer.decode(*_A , **_A)
@contextmanager
def _lowercase (self : List[str]) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
__snake_case : Dict = True
__snake_case : Union[str, Any] = self.tokenizer
yield
__snake_case : Optional[Any] = self.feature_extractor
__snake_case : int = False
| 172
| 0
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329
|
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , snake_case=1 / 255 , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False ):
if not batched:
lowercase = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowercase = image.size
else:
lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size['shortest_edge'] * h / w )
lowercase = self.size["shortest_edge"]
elif w > h:
lowercase = self.size["shortest_edge"]
lowercase = int(self.size['shortest_edge'] * w / h )
else:
lowercase = self.size["shortest_edge"]
lowercase = self.size["shortest_edge"]
else:
lowercase = []
for image in image_inputs:
lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase = max(snake_case__ , key=lambda snake_case : item[0] )[0]
lowercase = max(snake_case__ , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = DetaImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DetaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case__ , 'do_pad' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowercase = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
lowercase = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
lowercase = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# prepare image and target
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase = json.loads(f.read() )
lowercase = {"image_id": 3_9769, "annotations": target}
# encode them
lowercase = DetaImageProcessor()
lowercase = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors='pt' )
# verify pixel values
lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case__ )
lowercase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowercase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case__ ) )
# verify boxes
lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case__ )
lowercase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case__ ) )
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case__ ) )
# verify class_labels
lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case__ ) )
# verify orig_size
lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case__ ) )
# verify size
lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# prepare image, target and masks_path
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase = json.loads(f.read() )
lowercase = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase = DetaImageProcessor(format='coco_panoptic' )
lowercase = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors='pt' )
# verify pixel values
lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case__ )
lowercase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
lowercase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case__ ) )
# verify boxes
lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case__ )
lowercase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case__ ) )
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case__ ) )
# verify class_labels
lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case__ ) )
# verify masks
lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case__ )
# verify orig_size
lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case__ ) )
# verify size
lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case__ ) )
| 195
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[str] = "canine"
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=768 , snake_case__ : Tuple=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=1_6384 , snake_case__ : str=16 , snake_case__ : Tuple=0.02 , snake_case__ : Dict=1E-12 , snake_case__ : Any=0 , snake_case__ : Optional[int]=0xe_000 , snake_case__ : List[str]=0xe_001 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : List[Any]=8 , snake_case__ : List[str]=1_6384 , snake_case__ : Union[str, Any]=128 , **snake_case__ : Tuple , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Optional[int] =hidden_size
lowerCamelCase_ : Tuple =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : str =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : List[Any] =hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase_ : Dict =initializer_range
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : Optional[Any] =layer_norm_eps
# Character config:
lowerCamelCase_ : List[str] =downsampling_rate
lowerCamelCase_ : List[Any] =upsampling_kernel_size
lowerCamelCase_ : Any =num_hash_functions
lowerCamelCase_ : Optional[int] =num_hash_buckets
lowerCamelCase_ : Union[str, Any] =local_transformer_stride
| 144
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =None
lowerCamelCase : Union[str, Any] =BloomTokenizerFast
lowerCamelCase : List[str] =BloomTokenizerFast
lowerCamelCase : List[str] =True
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Union[str, Any] ="tokenizer_file"
lowerCamelCase : Union[str, Any] ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int , **a : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__lowerCamelCase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__lowerCamelCase = tokenizer.batch_encode_plus(a )['''input_ids''']
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[int]=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCamelCase = '''This is a simple input'''
__lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowerCamelCase = ('''This is a simple input''', '''This is a pair''')
__lowerCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(a , max_length=a )
tokenizer_r.encode_plus(a , max_length=a )
tokenizer_r.batch_encode_plus(a , max_length=a )
tokenizer_r.encode(a , max_length=a )
tokenizer_r.batch_encode_plus(a , max_length=a )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
__lowerCamelCase = None # Hotfixing padding = None
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='''max_length''' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='''max_length''' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='''max_length''' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='''max_length''' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='''max_length''' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=a )
__lowerCamelCase = next(iter(a ) )['''premise'''] # pick up one data
__lowerCamelCase = list(sample_data.values() )
__lowerCamelCase = list(map(tokenizer.encode , a ) )
__lowerCamelCase = [tokenizer.decode(a , clean_up_tokenization_spaces=a ) for x in output_tokens]
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 237
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =LongformerTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[str] =LongformerTokenizerFast
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : int , **a : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : str , **a : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCamelCase = tokenizer.tokenize(a ) # , add_prefix_space=True)
self.assertListEqual(a , a )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = '''Encode this sequence.'''
__lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a , a )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a , a )
# Testing spaces after special tokens
__lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a , lstrip=a , rstrip=a )} ) # mask token has a left space
__lowerCamelCase = tokenizer.convert_tokens_to_ids(a )
__lowerCamelCase = '''Encode <mask> sequence'''
__lowerCamelCase = '''Encode <mask>sequence'''
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a , a )
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
__lowerCamelCase = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
__lowerCamelCase = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''trim_offsets'''] , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase = f"""{text_of_1_token} {text_of_1_token}"""
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
| 237
| 1
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Callable , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[dict] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Dict , ) ->List[Any]:
"""simple docstring"""
super().__init__(
features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a = Generator(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , generator=__UpperCAmelCase , gen_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
if self.streaming:
a = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split='''train''' , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 0
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= logging.get_logger()
# the current default level is logging.WARNING
__lowercase= logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase )
def _A (self ):
__lowercase= logging.get_verbosity()
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _A (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase )
__lowercase= logging.log_levels[env_level_str]
__lowercase= logging.get_verbosity()
self.assertEqual(
lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
__lowercase= ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _A (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowercase= logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _A (self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning_advice(lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning_advice(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 295
| 0
|
from __future__ import annotations
from random import random
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ = None ):
"""simple docstring"""
a = value
a = random()
a = None
a = None
def __repr__(self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__(self ):
"""simple docstring"""
a = str(self.value ) + " "
a = str(self.left or "" )
a = str(self.right or "" )
return value + left + right
def a( A : Node | None , A : int ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a , a = split(root.left , A )
return left, root
else:
a , a = split(root.right , A )
return root, right
def a( A : Node | None , A : Node | None ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a = merge(left.right , A )
return left
else:
a = merge(A , right.left )
return right
def a( A : Node | None , A : int ) -> Node | None:
"""simple docstring"""
a = Node(A )
a , a = split(A , A )
return merge(merge(A , A ) , A )
def a( A : Node | None , A : int ) -> Node | None:
"""simple docstring"""
a , a = split(A , value - 1 )
a , a = split(A , A )
return merge(A , A )
def a( A : Node | None ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def a( A : Node | None , A : str ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
a = insert(A , int(arg[1:] ) )
elif arg[0] == "-":
a = erase(A , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def a( ) -> None:
"""simple docstring"""
a = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a = input()
while args != "q":
a = interact_treap(A , A )
print(A )
a = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 71
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase: List[Any] = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def a( A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(A , A )
_lowercase: Optional[int] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def a( A : str ) -> Union[str, Any]:
"""simple docstring"""
a = list(s_dict.keys() )
for key in keys:
a = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a = new_key.replace(A , A )
print(f'''{key} -> {new_key}''' )
a = s_dict.pop(A )
return s_dict
def a( A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a , a = emb.weight.shape
a = nn.Linear(A , A , bias=A )
a = emb.weight.data
return lin_layer
def a( A : str , A : str ) -> bytes:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
a = os.path.basename(A )
a = url.split("/" )[-2]
a = os.path.join(A , A )
if os.path.exists(A ) and not os.path.isfile(A ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(A ):
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(A ) as source, open(A , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=A , unit_divisor=1024 ) as loop:
while True:
a = source.read(8192 )
if not buffer:
break
output.write(A )
loop.update(len(A ) )
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def a( A : List[str] , A : Union[str, Any] ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
a = _download(_MODELS[checkpoint_path] )
else:
a = torch.load(A , map_location="cpu" )
a = original_checkpoint["dims"]
a = original_checkpoint["model_state_dict"]
a = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(A )
rename_keys(A )
a = True
a = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=A , decoder_ffn_dim=A , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a = WhisperForConditionalGeneration(A )
a , a = model.model.load_state_dict(A , strict=A )
if len(A ) > 0 and not set(A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
a = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a = proj_out_weights
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase: List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 71
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:str = StableUnCLIPPipeline
_UpperCamelCase:str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase:Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase:Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase:str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase:List[str] = False
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =32
lowerCamelCase_ =embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_SCREAMING_SNAKE_CASE , projection_dim=_SCREAMING_SNAKE_CASE , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ =PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_SCREAMING_SNAKE_CASE , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ =DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ =StableUnCLIPImageNormalizer(embedding_dim=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_SCREAMING_SNAKE_CASE , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=_SCREAMING_SNAKE_CASE , use_linear_projection=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
lowerCamelCase_ =DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ =AutoencoderKL()
lowerCamelCase_ ={
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )-> Optional[Any]:
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
lowerCamelCase_ =torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )-> int:
lowerCamelCase_ =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowerCamelCase_ =StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ =torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase_ =pipe("""anime turle""" , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" )
lowerCamelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ =StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowerCamelCase_ =pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ =pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 154
|
from __future__ import annotations
import math
def __UpperCamelCase ( _A : int , _A : int , _A : bool , _A : list[int] , _A : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(_A ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
return min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase_ =math.log(len(_A ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , _A , _A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 154
| 1
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 163
|
"""simple docstring"""
def a__ ( __lowercase=2_8123 ) -> List[Any]:
_A = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_A = set()
_A = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 163
| 1
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :str = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: str , a__: str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
a__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_UpperCAmelCase = torch.load(hf_hub_download(repo_id=a__ , filename='pytorch_model.bin' ) )
_UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_UpperCAmelCase = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_UpperCAmelCase = tensor_value
_UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
model.save_pretrained(a__ )
# convert tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(a__ )
tokenizer.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329
| 1
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase : Tuple = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def a__ ( snake_case__ , snake_case__=None ) -> Optional[Any]:
require_version(deps[pkg] , A_ )
| 358
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 1
@register_to_config
def __init__( self , _a=2_000 , _a=0.1 , _a=20 , _a=1e-3 ):
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase = std.unsqueeze(-1 )
lowerCamelCase = -score / std
# compute
lowerCamelCase = -1.0 / len(self.timesteps )
lowerCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase = beta_t.unsqueeze(-1 )
lowerCamelCase = -0.5 * beta_t * x
lowerCamelCase = torch.sqrt(_a )
lowerCamelCase = drift - diffusion**2 * score
lowerCamelCase = x + drift * dt
# add noise
lowerCamelCase = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
lowerCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 168
| 0
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Any ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : Optional[int] ={
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase : List[str] ={
"Salesforce/codegen-350M-mono": 2048,
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = ["""input_ids""", """attention_mask"""]
__lowercase = CodeGenTokenizer
def __init__( self :int , lowercase_ :List[Any]=None , lowercase_ :Optional[int]=None , lowercase_ :Optional[Any]=None , lowercase_ :Optional[int]="<|endoftext|>" , lowercase_ :Optional[Any]="<|endoftext|>" , lowercase_ :int="<|endoftext|>" , lowercase_ :Optional[Any]=False , **lowercase_ :List[Any] , )-> Optional[Any]:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
if kwargs.pop("add_bos_token" , lowercase_ ):
A__ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
A__ = getattr(lowercase_ , pre_tok_state.pop("type" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**lowercase_ )
A__ = add_prefix_space
def UpperCAmelCase_ ( self :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] )-> BatchEncoding:
A__ = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :List[str] , *lowercase_ :Dict , **lowercase_ :str )-> BatchEncoding:
A__ = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :int , lowercase_ :str , lowercase_ :Optional[str] = None )-> Tuple[str]:
A__ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , lowercase_ :bool = False , lowercase_ :bool = None , lowercase_ :Optional[List[str]] = None , **lowercase_ :List[Any] , )-> str:
A__ = super().decode(
token_ids=lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , )
if truncate_before_pattern is not None and len(lowercase_ ) > 0:
A__ = self.truncate(lowercase_ , lowercase_ )
return decoded_text
def UpperCAmelCase_ ( self :Tuple , lowercase_ :str , lowercase_ :str )-> Union[str, Any]:
def find_re(lowercase_ :Optional[Any] , lowercase_ :List[str] , lowercase_ :Optional[int] ):
A__ = pattern.search(lowercase_ , lowercase_ )
return m.start() if m else -1
A__ = [re.compile(lowercase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
A__ = list(re.finditer("^print" , lowercase_ , re.MULTILINE ) )
if len(lowercase_ ) > 1:
A__ = completion[: prints[1].start()]
A__ = list(re.finditer("^def" , lowercase_ , re.MULTILINE ) )
if len(lowercase_ ) > 1:
A__ = completion[: defs[1].start()]
A__ = 0
A__ = [
pos for pos in [find_re(lowercase_ , lowercase_ , lowercase_ ) for terminal in terminals] if pos != -1
]
if len(lowercase_ ) > 0:
return completion[: min(lowercase_ )]
else:
return completion
| 237
|
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 237
| 1
|
from math import factorial, pi
def __magic_name__ ( __a : List[Any] , __a : str = 30 ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
UpperCamelCase__ = float(_lowerCAmelCase )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowerCAmelCase ) )
def __magic_name__ ( __a : Optional[Any] , __a : Optional[int] = 30 ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
UpperCamelCase__ = float(_lowerCAmelCase )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 354
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = R"""\w+[.]\d+"""
UpperCamelCase__ = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ = key.replace(__a , """_""".join(pat.split(""".""" ) ) )
return key
def __magic_name__ ( __a : str , __a : Dict , __a : int ):
'''simple docstring'''
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : Optional[int]=42 ):
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ = flatten_dict(__a )
UpperCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = rename_key(__a )
UpperCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(__a )
return unflatten_dict(__a )
| 178
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.