code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCamelCase = RobertaPreLayerNormConfig.from_pretrained(
snake_case_ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
lowerCamelCase = torch.load(hf_hub_download(repo_id=snake_case_ , filename="""pytorch_model.bin""" ) )
lowerCamelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
lowerCamelCase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
lowerCamelCase = tensor_value
lowerCamelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case_ , config=snake_case_ , state_dict=snake_case_ )
model.save_pretrained(snake_case_ )
# convert tokenizer
lowerCamelCase = AutoTokenizer.from_pretrained(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 291
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a=False , _a=False , _a=6.0 , _a=None , _a=False , _a=False , _a=None , _a="fp4" , _a=False , **_a , ):
__a = load_in_abit
__a = load_in_abit
__a = llm_inta_threshold
__a = llm_inta_skip_modules
__a = llm_inta_enable_fpaa_cpu_offload
__a = llm_inta_has_fpaa_weight
__a = bnb_abit_quant_type
__a = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__a = torch.floataa
elif isinstance(_a , _a ):
__a = getattr(_a , _a )
elif isinstance(_a , torch.dtype ):
__a = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def __UpperCAmelCase ( self ):
if not isinstance(self.llm_inta_threshold , _a ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _a ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _a ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , _a ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , _a ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , _a ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def __UpperCAmelCase ( self ):
return self.load_in_abit or self.load_in_abit
def __UpperCAmelCase ( self ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
__a = cls(**_a )
__a = []
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
to_remove.append(_a )
for key in to_remove:
kwargs.pop(_a , _a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __UpperCAmelCase ( self , _a ):
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
__a = self.to_dict()
__a = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
writer.write(_a )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self ):
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def __UpperCAmelCase ( self , _a = True ):
if use_diff is True:
__a = self.to_diff_dict()
else:
__a = self.to_dict()
return json.dumps(_a , indent=2 , sort_keys=_a ) + "\n"
def __UpperCAmelCase ( self ):
__a = self.to_dict()
# get the default config dict
__a = BitsAndBytesConfig().to_dict()
__a = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__a = value
return serializable_config_dict
| 11
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11
| 1
|
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(lowercase__ , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 159
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9
| 0
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: Union[str, "sqlalchemy.sql.Selectable"] , UpperCamelCase: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase: Optional[Features] = None , UpperCamelCase: str = None , UpperCamelCase: bool = False , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
super().__init__(features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , **UpperCamelCase )
A__ = Sql(
cache_dir=UpperCamelCase , features=UpperCamelCase , sql=UpperCamelCase , con=UpperCamelCase , **UpperCamelCase , )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , )
# Build dataset for splits
A__ = self.builder.as_dataset(
split="""train""" , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Dataset , UpperCamelCase: str , UpperCamelCase: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[int] = None , **UpperCamelCase: Union[str, Any] , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
A__ = dataset
A__ = name
A__ = con
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = to_sql_kwargs
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.to_sql_kwargs.pop("""sql""" , UpperCamelCase )
A__ = self.to_sql_kwargs.pop("""con""" , UpperCamelCase )
A__ = self.to_sql_kwargs.pop("""index""" , UpperCamelCase )
A__ = self._write(index=UpperCamelCase , **self.to_sql_kwargs )
return written
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ , A__ , A__ = args
A__ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
A__ = query_table(
table=self.dataset.data , key=slice(UpperCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas()
A__ = df.to_sql(self.name , self.con , index=UpperCamelCase , **UpperCamelCase )
return num_rows or len(UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Optional[Any] , **UpperCamelCase: Dict ):
"""simple docstring"""
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase , UpperCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 69
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any=None ):
"""simple docstring"""
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self: Union[str, Any] , UpperCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase: Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = load_image(UpperCamelCase )
A__ = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(UpperCamelCase )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(UpperCamelCase , k=UpperCamelCase )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 69
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( __UpperCAmelCase ):
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_snake_case , '''tf_padding'''))
self.parent.assertTrue(hasattr(_snake_case , '''depth_multiplier'''))
class __snake_case :
def __init__( self : Dict , _snake_case : str , _snake_case : List[str]=13 , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[Any]=0.2_5 , _snake_case : int=8 , _snake_case : Dict=True , _snake_case : str=1024 , _snake_case : Tuple=32 , _snake_case : List[Any]="relu6" , _snake_case : int=0.1 , _snake_case : str=0.0_2 , _snake_case : Dict=True , _snake_case : str=True , _snake_case : Union[str, Any]=10 , _snake_case : str=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = int(last_hidden_size * depth_multiplier)
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = MobileNetVaModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self : int , _snake_case : str , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileNetVaForImageClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
UpperCAmelCase__ : int = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = MobileNetVaModelTester(self)
UpperCAmelCase_ = MobileNetVaConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : Any , _snake_case : str):
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 26
self.assertEqual(len(_snake_case) , _snake_case)
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@slow
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileNetVaModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Any):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''') if is_vision_available() else None
)
@slow
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''').to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''pt''').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 51
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : str = parent
A_ : int = batch_size
A_ : Dict = seq_length
A_ : Any = is_training
A_ : List[str] = use_input_mask
A_ : Any = use_token_type_ids
A_ : int = use_labels
A_ : str = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Dict = intermediate_size
A_ : Optional[int] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : int = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[int] = scope
def _a (self ):
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
if self.use_token_type_ids:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Tuple = None
A_ : List[str] = None
A_ : List[str] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase )
A_ : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : List[Any] = BioGptForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Optional[Any] = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
# create attention mask
A_ : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
A_ : Optional[Any] = self.seq_length // 2
A_ : List[Any] = 0
# first forward pass
A_, A_ : List[str] = model(lowercase , attention_mask=lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A_ : Union[str, Any] = ids_tensor((1,) , lowercase ).item() + 1
A_ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A_ : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Any = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , )
# get two different outputs
A_ : List[Any] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
A_ : Optional[int] = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["""last_hidden_state"""]
# select random slice
A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : int = output_from_no_past[:, -1, random_slice_idx].detach()
A_ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Optional[int] = BioGptModel(config=lowercase ).to(lowercase ).eval()
A_ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
# first forward pass
A_ : int = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
A_, A_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A_ : List[str] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
A_ : Tuple = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[
"""last_hidden_state"""
]
# select random slice
A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ):
A_ : Union[str, Any] = BioGptForCausalLM(lowercase )
model.to(lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A_ : Dict = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _a (self , lowercase , *lowercase ):
A_ : Union[str, Any] = BioGptModel(lowercase )
A_ : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Union[str, Any] = self.num_labels
A_ : Optional[int] = BioGptForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self ):
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Dict = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = (BioGptForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = False
def _a (self ):
A_ : Tuple = BioGptModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : int = type
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase )
@slow
def _a (self ):
A_ : str = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
A_ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : List[str] = """left"""
# Define PAD Token = EOS Token = 50256
A_ : Any = tokenizer.eos_token
A_ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A_ : List[Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A_ : List[str] = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase )
A_ : List[str] = inputs["""input_ids"""].to(lowercase )
A_ : List[Any] = model.generate(
input_ids=lowercase , attention_mask=inputs["""attention_mask"""].to(lowercase ) , )
A_ : List[str] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(lowercase )
A_ : List[Any] = model.generate(input_ids=lowercase )
A_ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A_ : Union[str, Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(lowercase )
A_ : Any = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
A_ : List[str] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
A_ : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
A_ : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
A_ : Union[str, Any] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
@slow
def _a (self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = BioGptModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = 3
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(lowercase )
A_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ : Union[str, Any] = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a (self ):
A_, A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = 3
A_ : Dict = """multi_label_classification"""
A_ : List[Any] = input_dict["""input_ids"""]
A_ : Tuple = input_ids.ne(1 ).to(lowercase )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Dict = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A_ : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]] )
A_ : Dict = model(lowercase )[0]
A_ : Any = 42384
A_ : Union[str, Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowercase )
A_ : Union[str, Any] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
A_ : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
torch.manual_seed(0 )
A_ : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(lowercase )
A_ : Optional[int] = model.generate(
**lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowercase , )
A_ : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase )
A_ : Any = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(lowercase , lowercase )
| 206
| 0
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15
|
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( UpperCamelCase : int = 1000000 , UpperCamelCase : int = 10 ):
UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 279
| 0
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( *snake_case_ , **snake_case_ ):
'''simple docstring'''
pass
def _lowerCamelCase ( lowerCamelCase_ : Image ):
"""simple docstring"""
UpperCAmelCase_ : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = DepthEstimationPipeline(model=snake_case_ , image_processor=snake_case_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , snake_case_ )
import datasets
UpperCAmelCase_ : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
UpperCAmelCase_ : Union[str, Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , snake_case_ , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = 'Intel/dpt-large'
UpperCAmelCase_ : Dict = pipeline('depth-estimation' , model=snake_case_ )
UpperCAmelCase_ : Optional[int] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCAmelCase_ : Union[str, Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_62 )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 367
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase_ :int
lowerCamelCase_ :int
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
UpperCAmelCase_ : Any = size
def __getitem__( self , snake_case_ ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._size
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = deque([start_vertex] )
UpperCAmelCase_ : list[int | None] = [None] * self.size
UpperCAmelCase_ : str = 0
while queue:
UpperCAmelCase_ : Optional[int] = queue.popleft()
UpperCAmelCase_ : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase_ : Union[str, Any] = current_distance + edge.weight
UpperCAmelCase_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase_ : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=6.0 , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=None , __lowerCamelCase="fp4" , __lowerCamelCase=False , **__lowerCamelCase , ) -> str:
_A : Optional[int] = load_in_abit
_A : Union[str, Any] = load_in_abit
_A : Union[str, Any] = llm_inta_threshold
_A : Optional[int] = llm_inta_skip_modules
_A : int = llm_inta_enable_fpaa_cpu_offload
_A : List[Any] = llm_inta_has_fpaa_weight
_A : Tuple = bnb_abit_quant_type
_A : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_A : Optional[int] = torch.floataa
elif isinstance(__lowerCamelCase , __lowerCamelCase):
_A : Optional[int] = getattr(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , torch.dtype):
_A : int = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
self.post_init()
def _lowerCamelCase ( self) -> Any:
if not isinstance(self.llm_inta_threshold , __lowerCamelCase):
raise ValueError("llm_int8_threshold must be a float")
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowerCamelCase):
raise ValueError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowerCamelCase):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean")
if not isinstance(self.llm_inta_has_fpaa_weight , __lowerCamelCase):
raise ValueError("llm_int8_has_fp16_weight must be a boolean")
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype")
if not isinstance(self.bnb_abit_quant_type , __lowerCamelCase):
raise ValueError("bnb_4bit_quant_type must be a string")
if not isinstance(self.bnb_abit_use_double_quant , __lowerCamelCase):
raise ValueError("bnb_4bit_use_double_quant must be a boolean")
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
"0.39.0"):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version")
def _lowerCamelCase ( self) -> List[str]:
return self.load_in_abit or self.load_in_abit
def _lowerCamelCase ( self) -> Any:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Any:
_A : Union[str, Any] = cls(**__lowerCamelCase)
_A : Any = []
for key, value in kwargs.items():
if hasattr(__lowerCamelCase , __lowerCamelCase):
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
to_remove.append(__lowerCamelCase)
for key in to_remove:
kwargs.pop(__lowerCamelCase , __lowerCamelCase)
if return_unused_kwargs:
return config, kwargs
else:
return config
def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]:
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
_A : Optional[int] = self.to_dict()
_A : Optional[int] = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase) + "\n"
writer.write(__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict[str, Any]:
_A : Optional[Any] = copy.deepcopy(self.__dict__)
_A : int = str(output["bnb_4bit_compute_dtype"]).split(".")[1]
return output
def __repr__( self) -> List[str]:
return F"{self.__class__.__name__} {self.to_json_string()}"
def _lowerCamelCase ( self , __lowerCamelCase = True) -> str:
if use_diff is True:
_A : str = self.to_diff_dict()
else:
_A : Optional[Any] = self.to_dict()
return json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase) + "\n"
def _lowerCamelCase ( self) -> Dict[str, Any]:
_A : List[str] = self.to_dict()
# get the default config dict
_A : Any = BitsAndBytesConfig().to_dict()
_A : Optional[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_A : Tuple = value
return serializable_config_dict
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'lxmert'
snake_case_ = {}
def __init__( self : int , snake_case : Dict=3_0522 , snake_case : Optional[int]=768 , snake_case : str=12 , snake_case : List[Any]=9500 , snake_case : Optional[Any]=1600 , snake_case : str=400 , snake_case : Dict=3072 , snake_case : Tuple="gelu" , snake_case : str=0.1 , snake_case : int=0.1 , snake_case : Any=512 , snake_case : List[str]=2 , snake_case : List[str]=0.02 , snake_case : Optional[int]=1e-12 , snake_case : List[str]=9 , snake_case : List[Any]=5 , snake_case : str=5 , snake_case : int=2048 , snake_case : int=4 , snake_case : Optional[Any]=6.67 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : Tuple=True , snake_case : Any=True , snake_case : Tuple=True , snake_case : Tuple=True , snake_case : Tuple=True , **snake_case : List[Any] , ):
'''simple docstring'''
A__ : List[Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Union[str, Any] = num_attention_heads
A__ : int = hidden_act
A__ : Optional[int] = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : Dict = type_vocab_size
A__ : Union[str, Any] = initializer_range
A__ : Tuple = layer_norm_eps
A__ : str = num_qa_labels
A__ : List[str] = num_object_labels
A__ : List[str] = num_attr_labels
A__ : Any = l_layers
A__ : Dict = x_layers
A__ : Optional[Any] = r_layers
A__ : Optional[int] = visual_feat_dim
A__ : Union[str, Any] = visual_pos_dim
A__ : Dict = visual_loss_normalizer
A__ : str = task_matched
A__ : List[Any] = task_mask_lm
A__ : int = task_obj_predict
A__ : Optional[Any] = task_qa
A__ : Union[str, Any] = visual_obj_loss
A__ : int = visual_attr_loss
A__ : str = visual_feat_loss
A__ : Union[str, Any] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**snake_case )
| 296
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = (DPMSolverSinglestepScheduler,)
SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),)
def a_ ( self, **lowerCAmelCase__) -> int:
snake_case_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**lowerCAmelCase__)
return config
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> List[Any]:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1):
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self) -> Union[str, Any]:
pass
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> int:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]:
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
return sample
def a_ ( self) -> List[Any]:
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
snake_case_ = 50
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2574) < 1e-3
def a_ ( self) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
def a_ ( self) -> str:
self.check_over_configs(thresholding=lowerCAmelCase__)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, algorithm_type='dpmsolver++', solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, )
def a_ ( self) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, )
snake_case_ = self.full_loop(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, )
assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers"
def a_ ( self) -> Optional[Any]:
self.check_over_configs(lower_order_final=lowerCAmelCase__)
self.check_over_configs(lower_order_final=lowerCAmelCase__)
def a_ ( self) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def a_ ( self) -> Any:
self.check_over_configs(variance_type=lowerCAmelCase__)
self.check_over_configs(variance_type='learned_range')
def a_ ( self) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0)
def a_ ( self) -> int:
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
def a_ ( self) -> Dict:
snake_case_ = self.full_loop(use_karras_sigmas=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2248) < 1e-3
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.full_loop(prediction_type='v_prediction')
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.1453) < 1e-3
def a_ ( self) -> Optional[Any]:
snake_case_ = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.0649) < 1e-3
def a_ ( self) -> Optional[int]:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
assert sample.dtype == torch.floataa
| 69
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 69
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a ( __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
UpperCamelCase__ :str = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCamelCase__ :Any = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCamelCase__ :Any = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
UpperCamelCase__ :Tuple = [3, 3, 3, 3]
UpperCamelCase__ :Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
UpperCamelCase__ :Union[str, Any] = [4, 4, 4, 4]
UpperCamelCase__ :Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
UpperCamelCase__ :int = [3, 3, 3, 3]
if "lrf" in model_name:
UpperCamelCase__ :Union[str, Any] = [3, 3, 3, 3]
else:
UpperCamelCase__ :List[str] = [2, 2, 2, 2]
if "tiny" in model_name:
UpperCamelCase__ :Dict = 96
elif "small" in model_name:
UpperCamelCase__ :List[str] = 96
elif "base" in model_name:
UpperCamelCase__ :Optional[int] = 128
elif "large" in model_name:
UpperCamelCase__ :int = 192
elif "xlarge" in model_name:
UpperCamelCase__ :Union[str, Any] = 256
elif "huge" in model_name:
UpperCamelCase__ :Optional[int] = 352
# set label information
UpperCamelCase__ :str = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
UpperCamelCase__ :Tuple = '''imagenet-22k-id2label.json'''
else:
UpperCamelCase__ :Union[str, Any] = '''imagenet-1k-id2label.json'''
UpperCamelCase__ :List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ :List[Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ :List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :Any = FocalNetConfig(
embed_dim=__lowerCAmelCase , depths=__lowerCAmelCase , focal_levels=__lowerCAmelCase , focal_windows=__lowerCAmelCase , use_conv_embed=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , use_post_layernorm=__lowerCAmelCase , use_layerscale=__lowerCAmelCase , )
return config
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
UpperCamelCase__ :int = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase__ :Any = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCamelCase__ :List[str] = '''encoder.''' + name
if "encoder.layers" in name:
UpperCamelCase__ :Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
UpperCamelCase__ :Tuple = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
UpperCamelCase__ :Optional[int] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
UpperCamelCase__ :List[Any] = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
UpperCamelCase__ :Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
UpperCamelCase__ :Dict = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
UpperCamelCase__ :Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
UpperCamelCase__ :Optional[Any] = '''layernorm.bias'''
if "head" in name:
UpperCamelCase__ :str = name.replace('''head''' , '''classifier''' )
else:
UpperCamelCase__ :Optional[int] = '''focalnet.''' + name
return name
def a ( __a , __a , __a=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
UpperCamelCase__ :Tuple = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCAmelCase )
UpperCamelCase__ :Any = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ :Optional[Any] = state_dict.pop(__lowerCAmelCase )
UpperCamelCase__ :List[str] = val
UpperCamelCase__ :List[str] = get_focalnet_config(__lowerCAmelCase )
UpperCamelCase__ :Dict = FocalNetForImageClassification(__lowerCAmelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCAmelCase )
# verify conversion
UpperCamelCase__ :List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ :Optional[Any] = BitImageProcessor(
do_resize=__lowerCAmelCase , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCAmelCase , crop_size=224 , do_normalize=__lowerCAmelCase , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase , )
UpperCamelCase__ :Union[str, Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
UpperCamelCase__ :List[str] = processor(images=__lowerCAmelCase , return_tensors='''pt''' )
UpperCamelCase__ :int = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase__ :Optional[Any] = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCAmelCase , atol=1e-4 )
UpperCamelCase__ :List[Any] = model(**__lowerCAmelCase )
UpperCamelCase__ :Dict = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
UpperCamelCase__ :Any = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
UpperCamelCase__ :List[Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
UpperCamelCase__ :Dict = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
UpperCamelCase__ :Tuple = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
UpperCamelCase__ :Tuple = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
UpperCamelCase__ :List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
__snake_case =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 371
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = AltDiffusionPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ :int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCamelCase__ :Optional[Any] = CLIPTextModel(UpperCamelCase_ )
UpperCamelCase__ :int = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ :Dict = 77
UpperCamelCase__ :Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('''mps''' ):
UpperCamelCase__ :Dict = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase__ :List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Tuple = self.get_dummy_components()
torch.manual_seed(0 )
UpperCamelCase__ :Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase__ :Union[str, Any] = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = text_encoder
UpperCamelCase__ :Any = AltDiffusionPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Dict = '''A photo of an astronaut'''
UpperCamelCase__ :Union[str, Any] = alt_pipe(**UpperCamelCase_ )
UpperCamelCase__ :Any = output.images
UpperCamelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :List[Any] = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :int = self.get_dummy_components()
UpperCamelCase__ :Union[str, Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
UpperCamelCase__ :Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase__ :str = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = text_encoder
UpperCamelCase__ :Union[str, Any] = AltDiffusionPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = alt_pipe(**UpperCamelCase_ )
UpperCamelCase__ :str = output.images
UpperCamelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :List[Any] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :str = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :int = torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCamelCase__ :Union[str, Any] = output.images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCamelCase__ :List[str] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :Dict = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
UpperCamelCase__ :Any = output.images
UpperCamelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :int = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 219
| 0
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE :List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE :List[str] = logging.getLogger()
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("-f" )
__A = parser.parse_args()
return args.f
def UpperCAmelCase ( a_ , a_="eval" ) -> Dict:
"""simple docstring"""
__A = os.path.join(a_ , F'''{split}_results.json''' )
if os.path.exists(a_ ):
with open(a_ , "r" ) as f:
return json.load(a_ )
raise ValueError(F'''can\'t find {path}''' )
SCREAMING_SNAKE_CASE :Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.get_auto_remove_tmp_dir()
__A = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A ,"argv" ,A ):
run_flax_glue.main()
__A = get_results(A )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
@slow
def UpperCamelCase_ ( self : List[str] ):
__A = self.get_auto_remove_tmp_dir()
__A = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A ,"argv" ,A ):
run_clm_flax.main()
__A = get_results(A )
self.assertLess(result["eval_perplexity"] ,1_00 )
@slow
def UpperCamelCase_ ( self : int ):
__A = self.get_auto_remove_tmp_dir()
__A = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(A ,"argv" ,A ):
run_summarization_flax.main()
__A = get_results(A ,split="test" )
self.assertGreaterEqual(result["test_rouge1"] ,10 )
self.assertGreaterEqual(result["test_rouge2"] ,2 )
self.assertGreaterEqual(result["test_rougeL"] ,7 )
self.assertGreaterEqual(result["test_rougeLsum"] ,7 )
@slow
def UpperCamelCase_ ( self : int ):
__A = self.get_auto_remove_tmp_dir()
__A = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(A ,"argv" ,A ):
run_mlm_flax.main()
__A = get_results(A )
self.assertLess(result["eval_perplexity"] ,42 )
@slow
def UpperCamelCase_ ( self : Any ):
__A = self.get_auto_remove_tmp_dir()
__A = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A ,"argv" ,A ):
run_ta_mlm_flax.main()
__A = get_results(A )
self.assertGreaterEqual(result["eval_accuracy"] ,0.42 )
@slow
def UpperCamelCase_ ( self : Any ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__A = 7 if get_gpu_count() > 1 else 2
__A = self.get_auto_remove_tmp_dir()
__A = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(A ,"argv" ,A ):
run_flax_ner.main()
__A = get_results(A )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
self.assertGreaterEqual(result["eval_f1"] ,0.3 )
@slow
def UpperCamelCase_ ( self : int ):
__A = self.get_auto_remove_tmp_dir()
__A = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(A ,"argv" ,A ):
run_qa.main()
__A = get_results(A )
self.assertGreaterEqual(result["eval_f1"] ,30 )
self.assertGreaterEqual(result["eval_exact"] ,30 )
| 15
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 2
@register_to_config
def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,):
# standard deviation of the initial noise distribution
__A = sigma_max
# setable values
__A = None
__A = None
__A = None # sigma(t_i)
def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ):
return sample
def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ):
__A = num_inference_steps
__A = np.arange(0 ,self.num_inference_steps )[::-1].copy()
__A = torch.from_numpy(A ).to(A )
__A = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__A = torch.tensor(A ,dtype=torch.floataa ,device=A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
__A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
__A = 0
# sample eps ~ N(0, S_noise^2 * I)
__A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device )
__A = sigma + gamma * sigma
__A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,):
__A = sample_hat + sigma_hat * model_output
__A = (sample_hat - pred_original_sample) / sigma_hat
__A = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A ,derivative=A ,pred_original_sample=A )
def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,):
__A = sample_prev + sigma_prev * model_output
__A = (sample_prev - pred_original_sample) / sigma_prev
__A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A ,derivative=A ,pred_original_sample=A )
def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ):
raise NotImplementedError()
| 15
| 1
|
from timeit import timeit
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ = 0
while number:
number &= number - 1
result += 1
return result
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _UpperCamelCase ():
"""simple docstring"""
def do_benchmark(a__ :int ) -> None:
UpperCamelCase__ = """import __main__ as z"""
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(a__ ) = }""" )
UpperCamelCase__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=a__ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(a__ ) = }""" )
UpperCamelCase__ = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=a__ , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(a__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 87
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCamelCase__ = random.Random()
def _UpperCamelCase (a__ :Any , a__ :Union[str, Any]=1.0 , a__ :Tuple=None , a__ :str=None ):
"""simple docstring"""
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=400 , __lowerCAmelCase=2000 , __lowerCAmelCase=10 , __lowerCAmelCase=160 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=4000 , __lowerCAmelCase=False , __lowerCAmelCase=True , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
UpperCamelCase__ = feature_size
UpperCamelCase__ = chunk_length
UpperCamelCase__ = hop_length
def _lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
def _flatten(__lowerCAmelCase ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : int = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self ):
UpperCamelCase__ = WhisperFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
UpperCamelCase__ = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(__lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowerCAmelCase )
UpperCamelCase__ = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(__lowerCAmelCase )
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test truncation required
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
UpperCamelCase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs_truncated]
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("""id""" ).select(range(__lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
# fmt: off
UpperCamelCase__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = WhisperFeatureExtractor()
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowerCAmelCase , atol=1E-4 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = self._load_datasamples(1 )[0]
UpperCamelCase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
UpperCamelCase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(__lowerCAmelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase ) - 1 ) < 1E-3 ) )
| 87
| 1
|
from collections import deque
from math import floor
from random import random
from time import time
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : int = {}
def lowerCAmelCase ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=1 ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase__ : Optional[int] = [[w, v]]
if not self.graph.get(__lowerCAmelCase ):
lowerCamelCase__ : List[Any] = []
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowerCAmelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int=-2 , __lowerCamelCase : Tuple=-1 ):
'''simple docstring'''
if s == d:
return []
lowerCamelCase__ : str = []
lowerCamelCase__ : str = []
if s == -2:
lowerCamelCase__ : Union[str, Any] = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
lowerCamelCase__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowerCAmelCase ) != 0:
lowerCamelCase__ : List[str] = stack[len(__lowerCAmelCase ) - 1]
else:
lowerCamelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return visited
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int=-1 ):
'''simple docstring'''
if c == -1:
lowerCamelCase__ : int = floor(random() * 10000 ) + 10
for i in range(__lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowerCAmelCase , __lowerCAmelCase , 1 )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int=-2 ):
'''simple docstring'''
lowerCamelCase__ : str = deque()
lowerCamelCase__ : Optional[int] = []
if s == -2:
lowerCamelCase__ : str = list(self.graph )[0]
d.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
while d:
lowerCamelCase__ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ):
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : str=-2 ):
'''simple docstring'''
lowerCamelCase__ : str = []
lowerCamelCase__ : Dict = []
if s == -2:
lowerCamelCase__ : List[str] = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
lowerCamelCase__ : int = s
lowerCamelCase__ : Optional[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__lowerCAmelCase ) != 0:
lowerCamelCase__ : Dict = stack[len(__lowerCAmelCase ) - 1]
else:
lowerCamelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return sorted_nodes
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : str = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
lowerCamelCase__ : Dict = -2
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : str = s
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : Dict = len(__lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : Optional[Any] = True
if len(__lowerCAmelCase ) != 0:
lowerCamelCase__ : List[str] = stack[len(__lowerCAmelCase ) - 1]
else:
lowerCamelCase__ : str = False
indirect_parents.append(__lowerCAmelCase )
lowerCamelCase__ : Tuple = s
lowerCamelCase__ : int = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return list(__lowerCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : int = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
lowerCamelCase__ : int = -2
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Tuple = s
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : Optional[Any] = len(__lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : str = True
if len(__lowerCAmelCase ) != 0:
lowerCamelCase__ : Dict = stack[len(__lowerCAmelCase ) - 1]
else:
lowerCamelCase__ : Optional[Any] = False
indirect_parents.append(__lowerCAmelCase )
lowerCamelCase__ : Any = s
lowerCamelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return False
def lowerCAmelCase ( self : int , __lowerCamelCase : int=-2 , __lowerCamelCase : Optional[Any]=-1 ):
'''simple docstring'''
lowerCamelCase__ : List[str] = time()
self.dfs(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ : Dict = time()
return end - begin
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple=-2 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = time()
self.bfs(__lowerCAmelCase )
lowerCamelCase__ : List[str] = time()
return end - begin
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = {}
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict=1 ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase__ : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(__lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase__ : Any = [[w, u]]
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
'''simple docstring'''
if self.graph.get(__lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowerCAmelCase )
# the other way round
if self.graph.get(__lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__lowerCAmelCase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[str]=-2 , __lowerCamelCase : Optional[Any]=-1 ):
'''simple docstring'''
if s == d:
return []
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[Any] = []
if s == -2:
lowerCamelCase__ : Any = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
lowerCamelCase__ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowerCAmelCase ) != 0:
lowerCamelCase__ : List[str] = stack[len(__lowerCAmelCase ) - 1]
else:
lowerCamelCase__ : Dict = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return visited
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any=-1 ):
'''simple docstring'''
if c == -1:
lowerCamelCase__ : Union[str, Any] = floor(random() * 10000 ) + 10
for i in range(__lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase__ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowerCAmelCase , __lowerCAmelCase , 1 )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[Any]=-2 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = deque()
lowerCamelCase__ : Optional[Any] = []
if s == -2:
lowerCamelCase__ : List[Any] = list(self.graph )[0]
d.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
while d:
lowerCamelCase__ : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] ):
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : int = []
lowerCamelCase__ : int = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
lowerCamelCase__ : int = -2
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : int = s
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : str = len(__lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : Union[str, Any] = True
if len(__lowerCAmelCase ) != 0:
lowerCamelCase__ : Dict = stack[len(__lowerCAmelCase ) - 1]
else:
lowerCamelCase__ : Dict = False
indirect_parents.append(__lowerCAmelCase )
lowerCamelCase__ : int = s
lowerCamelCase__ : str = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return list(__lowerCAmelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : str = []
lowerCamelCase__ : Any = list(self.graph )[0]
stack.append(__lowerCAmelCase )
visited.append(__lowerCAmelCase )
lowerCamelCase__ : List[Any] = -2
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Union[str, Any] = s
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase__ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase__ : Union[str, Any] = len(__lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase__ : str = True
if len(__lowerCAmelCase ) != 0:
lowerCamelCase__ : int = stack[len(__lowerCAmelCase ) - 1]
else:
lowerCamelCase__ : str = False
indirect_parents.append(__lowerCAmelCase )
lowerCamelCase__ : Dict = s
lowerCamelCase__ : List[str] = ss
# check if se have reached the starting point
if len(__lowerCAmelCase ) == 0:
return False
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[str]=-2 , __lowerCamelCase : int=-1 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = time()
self.dfs(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ : List[Any] = time()
return end - begin
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple=-2 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = time()
self.bfs(__lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = time()
return end - begin
| 184
|
from collections import deque
class A :
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
A__ = process_name # process name
A__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A__ = arrival_time
A__ = burst_time # remaining burst time
A__ = 0 # total time of the process wait in ready queue
A__ = 0 # time from arrival time to completion time
class A :
'''simple docstring'''
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None:
"""simple docstring"""
A__ = number_of_queues
# time slice of queues that round robin algorithm applied
A__ = time_slices
# unfinished process is in this ready_queue
A__ = queue
# current time
A__ = current_time
# finished process is in this sequence queue
A__ = deque()
def a_ ( self : Dict ) -> list[str]:
"""simple docstring"""
A__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def a_ ( self : Any , __lowerCAmelCase : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]:
"""simple docstring"""
A__ = deque() # sequence deque of finished process
while len(__lowerCAmelCase ) != 0:
A__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A__ = 0
# set the process's turnaround time because it is finished
A__ = self.current_time - cp.arrival_time
# set the completion time
A__ = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
A__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCAmelCase ) ):
A__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A__ = 0
# set the finish time
A__ = self.current_time
# update the process' turnaround time because it is finished
A__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a_ ( self : List[Any] ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
A__ , A__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Union[str, Any] = Process('''P1''', 0, 5_3)
A : Optional[Any] = Process('''P2''', 0, 1_7)
A : Optional[int] = Process('''P3''', 0, 6_8)
A : int = Process('''P4''', 0, 2_4)
A : Any = 3
A : List[Any] = [1_7, 2_5]
A : Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A : Optional[Any] = Process('''P1''', 0, 5_3)
A : int = Process('''P2''', 0, 1_7)
A : Optional[int] = Process('''P3''', 0, 6_8)
A : Tuple = Process('''P4''', 0, 2_4)
A : Union[str, Any] = 3
A : Optional[Any] = [1_7, 2_5]
A : Tuple = deque([Pa, Pa, Pa, Pa])
A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
A : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 274
| 0
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase__ : List[Any] = logging.getLogger(__name__)
lowercase__ : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase__ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_snake_case : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_snake_case : Optional[str] = field(default=__magic_name__ , metadata={'help': 'The input training data file (a text file).'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_snake_case : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_snake_case : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
_snake_case : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_snake_case : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
_snake_case : bool = field(
default=__magic_name__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCamelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a__ ( lowercase : Union[str, Any], lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
with open(lowercase, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.loads(lowercase ) for line in f.read().splitlines() if (len(lowercase ) > 0 and not line.isspace())]
assert len(lowercase ) == len(lowercase )
_UpperCamelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCamelCase = refs
return Dataset.from_dict(lowercase )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCamelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
_UpperCamelCase = {}
if data_args.train_file is not None:
_UpperCamelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase = data_args.validation_file
_UpperCamelCase = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
_UpperCamelCase = '''text'''
_UpperCamelCase = load_dataset(lowercase, data_files=lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name, **lowercase )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowercase )
else:
_UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
_UpperCamelCase = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowercase )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
_UpperCamelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('''Training new model from scratch''' )
_UpperCamelCase = AutoModelForMaskedLM.from_config(lowercase )
model.resize_token_embeddings(len(lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCamelCase = datasets['''train'''].column_names
else:
_UpperCamelCase = datasets['''validation'''].column_names
_UpperCamelCase = '''text''' if '''text''' in column_names else column_names[0]
_UpperCamelCase = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(lowercase : Tuple ):
# Remove empty lines
_UpperCamelCase = [line for line in examples['''text'''] if len(lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''], padding=lowercase, truncation=lowercase, max_length=data_args.max_seq_length )
_UpperCamelCase = datasets.map(
lowercase, batched=lowercase, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCamelCase = add_chinese_references(tokenized_datasets['''train'''], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCamelCase = add_chinese_references(
tokenized_datasets['''validation'''], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCamelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCamelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCamelCase = DataCollatorForWholeWordMask(tokenizer=lowercase, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None, eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None, tokenizer=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
_UpperCamelCase = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase = os.path.join(training_args.output_dir, '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, '''trainer_state.json''' ) )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = math.exp(eval_output['''eval_loss'''] )
_UpperCamelCase = perplexity
_UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a__ ( lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 287
|
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(lowercase, x % y )
def a__ ( lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(lowercase, lowercase )
def a__ ( lowercase : int = 20 ) -> int:
"""simple docstring"""
_UpperCamelCase = 1
for i in range(1, n + 1 ):
_UpperCamelCase = lcm(lowercase, lowercase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar("""T""")
class UpperCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : bool = True ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} # dictionary of lists
SCREAMING_SNAKE_CASE = directed
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : T ,lowerCamelCase__ : T ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
self.adj_list[destination_vertex].append(lowerCamelCase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE = [destination_vertex]
SCREAMING_SNAKE_CASE = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE = [destination_vertex]
SCREAMING_SNAKE_CASE = []
return self
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 296
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=9_9 , __UpperCamelCase=3_2 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=4 , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_attention_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase_ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase_ = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCamelCase_ = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCamelCase_ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 352
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261
| 0
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : BigBirdConfig
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
def SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setup()
__lowercase = nn.Dense(5 ,dtype=self.dtype )
def __call__( self : List[str] ,*lowercase__ : int ,**lowercase__ : int ):
__lowercase = super().__call__(*lowercase__ ,**lowercase__ )
__lowercase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def _A ( A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
def cross_entropy(A__ , A__ , A__=None ):
__lowercase = logits.shape[-1]
__lowercase = (labels[..., None] == jnp.arange(A__ )[None]).astype('''f4''' )
__lowercase = jax.nn.log_softmax(A__ , axis=-1 )
__lowercase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowercase = reduction(A__ )
return loss
__lowercase = partial(A__ , reduction=jnp.mean )
__lowercase = cross_entropy(A__ , A__ )
__lowercase = cross_entropy(A__ , A__ )
__lowercase = cross_entropy(A__ , A__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE : int = 3_0_0_0
SCREAMING_SNAKE_CASE : int = 1_0_5_0_0
SCREAMING_SNAKE_CASE : int = 1_2_8
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : int = 5
# tx_args
SCREAMING_SNAKE_CASE : float = 3e-5
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : int = 2_0_0_0_0
SCREAMING_SNAKE_CASE : float = 0.0095
SCREAMING_SNAKE_CASE : str = "bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE : str = "training-expt"
SCREAMING_SNAKE_CASE : str = "data/nq-training.jsonl"
SCREAMING_SNAKE_CASE : str = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE ( self : List[str] ):
os.makedirs(self.base_dir ,exist_ok=lowercase__ )
__lowercase = os.path.join(self.base_dir ,self.save_dir )
__lowercase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self : Dict ,lowercase__ : int ):
__lowercase = self.collate_fn(lowercase__ )
__lowercase = jax.tree_util.tree_map(lowercase__ ,lowercase__ )
return batch
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Tuple ):
__lowercase , __lowercase = self.fetch_inputs(features['''input_ids'''] )
__lowercase = {
'''input_ids''': jnp.array(lowercase__ ,dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowercase__ ,dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] ,dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] ,dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] ,dtype=jnp.intaa ),
}
return batch
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : list ):
__lowercase = [self._fetch_inputs(lowercase__ ) for ids in input_ids]
return zip(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : list ):
__lowercase = [1 for _ in range(len(lowercase__ ) )]
while len(lowercase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _A ( A__ , A__ , A__=None ):
"""simple docstring"""
if seed is not None:
__lowercase = dataset.shuffle(seed=A__ )
for i in range(len(A__ ) // batch_size ):
__lowercase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A__ )
@partial(jax.pmap , axis_name='''batch''' )
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
def loss_fn(A__ ):
__lowercase = model_inputs.pop('''start_labels''' )
__lowercase = model_inputs.pop('''end_labels''' )
__lowercase = model_inputs.pop('''pooled_labels''' )
__lowercase = state.apply_fn(**A__ , params=A__ , dropout_rng=A__ , train=A__ )
__lowercase , __lowercase , __lowercase = outputs
return state.loss_fn(
A__ , A__ , A__ , A__ , A__ , A__ , )
__lowercase , __lowercase = jax.random.split(A__ )
__lowercase = jax.value_and_grad(A__ )
__lowercase , __lowercase = grad_fn(state.params )
__lowercase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__lowercase = jax.lax.pmean(A__ , '''batch''' )
__lowercase = state.apply_gradients(grads=A__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _A ( A__ , **A__ ):
"""simple docstring"""
__lowercase = model_inputs.pop('''start_labels''' )
__lowercase = model_inputs.pop('''end_labels''' )
__lowercase = model_inputs.pop('''pooled_labels''' )
__lowercase = state.apply_fn(**A__ , params=state.params , train=A__ )
__lowercase , __lowercase , __lowercase = outputs
__lowercase = state.loss_fn(A__ , A__ , A__ , A__ , A__ , A__ )
__lowercase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class lowercase_ (train_state.TrainState ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Callable = struct.field(pytree_node=lowerCamelCase__ )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Args
SCREAMING_SNAKE_CASE : Callable
SCREAMING_SNAKE_CASE : Callable
SCREAMING_SNAKE_CASE : Callable
SCREAMING_SNAKE_CASE : Callable
SCREAMING_SNAKE_CASE : wandb
SCREAMING_SNAKE_CASE : Callable = None
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any]=None ):
__lowercase = model.params
__lowercase = TrainState.create(
apply_fn=model.__call__ ,params=lowercase__ ,tx=lowercase__ ,loss_fn=lowercase__ ,)
if ckpt_dir is not None:
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = restore_checkpoint(lowercase__ ,lowercase__ )
__lowercase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__lowercase , __lowercase = build_tx(**lowercase__ )
__lowercase = train_state.TrainState(
step=lowercase__ ,apply_fn=model.__call__ ,params=lowercase__ ,tx=lowercase__ ,opt_state=lowercase__ ,)
__lowercase = args
__lowercase = data_collator
__lowercase = lr
__lowercase = params
__lowercase = jax_utils.replicate(lowercase__ )
return state
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ):
__lowercase = self.args
__lowercase = len(lowercase__ ) // args.batch_size
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(lowercase__ ,jax.device_count() )
for epoch in range(args.max_epochs ):
__lowercase = jnp.array(0 ,dtype=jnp.floataa )
__lowercase = get_batched_dataset(lowercase__ ,args.batch_size ,seed=lowercase__ )
__lowercase = 0
for batch in tqdm(lowercase__ ,total=lowercase__ ,desc=F"Running EPOCH-{epoch}" ):
__lowercase = self.data_collator(lowercase__ )
__lowercase , __lowercase , __lowercase = self.train_step_fn(lowercase__ ,lowercase__ ,**lowercase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__lowercase = jax_utils.unreplicate(state.step )
__lowercase = running_loss.item() / i
__lowercase = self.scheduler_fn(state_step - 1 )
__lowercase = self.evaluate(lowercase__ ,lowercase__ )
__lowercase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowercase__ ) )
self.logger.log(lowercase__ ,commit=lowercase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" ,state=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[Any] ):
__lowercase = get_batched_dataset(lowercase__ ,self.args.batch_size )
__lowercase = len(lowercase__ ) // self.args.batch_size
__lowercase = jnp.array(0 ,dtype=jnp.floataa )
__lowercase = 0
for batch in tqdm(lowercase__ ,total=lowercase__ ,desc='''Evaluating ... ''' ):
__lowercase = self.data_collator(lowercase__ )
__lowercase = self.val_step_fn(lowercase__ ,**lowercase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ,lowercase__ : Any ):
__lowercase = jax_utils.unreplicate(lowercase__ )
print(F"SAVING CHECKPOINT IN {save_dir}" ,end=''' ... ''' )
self.model_save_fn(lowercase__ ,params=state.params )
with open(os.path.join(lowercase__ ,'''opt_state.msgpack''' ) ,'''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(lowercase__ ,'''args.joblib''' ) )
joblib.dump(self.data_collator ,os.path.join(lowercase__ ,'''data_collator.joblib''' ) )
with open(os.path.join(lowercase__ ,'''training_state.json''' ) ,'''w''' ) as f:
json.dump({'''step''': state.step.item()} ,lowercase__ )
print('''DONE''' )
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=''' ... ''' )
with open(os.path.join(A__ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__lowercase = from_bytes(state.params , f.read() )
with open(os.path.join(A__ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__lowercase = from_bytes(state.opt_state , f.read() )
__lowercase = joblib.load(os.path.join(A__ , '''args.joblib''' ) )
__lowercase = joblib.load(os.path.join(A__ , '''data_collator.joblib''' ) )
with open(os.path.join(A__ , '''training_state.json''' ) , '''r''' ) as f:
__lowercase = json.load(A__ )
__lowercase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = num_train_steps - warmup_steps
__lowercase = optax.linear_schedule(init_value=A__ , end_value=A__ , transition_steps=A__ )
__lowercase = optax.linear_schedule(init_value=A__ , end_value=1e-7 , transition_steps=A__ )
__lowercase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
def weight_decay_mask(A__ ):
__lowercase = traverse_util.flatten_dict(A__ )
__lowercase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(A__ )
__lowercase = scheduler_fn(A__ , A__ , A__ , A__ )
__lowercase = optax.adamw(learning_rate=A__ , weight_decay=A__ , mask=A__ )
return tx, lr
| 104
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
| 219
| 0
|
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set_counts
_lowerCAmelCase : List[Any] = max(lowercase__ )
_lowerCAmelCase : Dict = len(lowercase__ )
_lowerCAmelCase : Any = [1] * num_sets
_lowerCAmelCase : Any = list(range(lowercase__ ) )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_parent(lowercase__ )
_lowerCAmelCase : Dict = self.get_parent(lowercase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_lowerCAmelCase : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = src_parent
_lowerCAmelCase : Optional[Any] = self.set_counts[src_parent]
_lowerCAmelCase : Tuple = max(self.max_set , lowercase__ )
return True
def a ( self , snake_case__ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_lowerCAmelCase : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 357
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
UpperCamelCase = datasets.load_iris()
UpperCamelCase = np.array(data['''data'''])
UpperCamelCase = np.array(data['''target'''])
UpperCamelCase = data['''target_names''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = train_test_split(X, y)
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any]):
return np.linalg.norm(np.array(_lowerCamelCase) - np.array(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Any=5):
lowercase__ : int = zip(_lowerCamelCase , _lowerCamelCase)
# List of distances of all points from the point to be classified
lowercase__ : str = []
for data_point in data:
lowercase__ : str = euclidean_distance(data_point[0] , _lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
lowercase__ : Dict = [i[1] for i in sorted(_lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowercase__ : Tuple = Counter(_lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 87
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=[] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE__ : Optional[int] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE__ : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
SCREAMING_SNAKE_CASE__ : str = np.pad(SCREAMING_SNAKE_CASE__ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE__ , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE__ : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE__ , min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] ) -> Optional[int]:
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : [int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = list(SCREAMING_SNAKE_CASE__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE__ : Tuple = clamp_rect(SCREAMING_SNAKE_CASE__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE__ , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tile.crop(SCREAMING_SNAKE_CASE__ )
return tile
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = n % d
return n - divisor
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : AutoencoderKL, _UpperCAmelCase : CLIPTextModel, _UpperCAmelCase : CLIPTokenizer, _UpperCAmelCase : UNetaDConditionModel, _UpperCAmelCase : DDPMScheduler, _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], _UpperCAmelCase : int = 3_5_0, ) -> int:
"""simple docstring"""
super().__init__(
vae=_UpperCAmelCase, text_encoder=_UpperCAmelCase, tokenizer=_UpperCAmelCase, unet=_UpperCAmelCase, low_res_scheduler=_UpperCAmelCase, scheduler=_UpperCAmelCase, max_noise_level=_UpperCAmelCase, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : str, **_UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice), x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice), y * tile_size ),
min(image.size[0], (x + 1) * tile_size ),
min(image.size[1], (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE__ : str = add_overlap_rect(_UpperCAmelCase, _UpperCAmelCase, image.size )
SCREAMING_SNAKE_CASE__ : List[str] = image.crop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE__ : Optional[int] = max(0, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = squeeze_tile(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = to_input.size
SCREAMING_SNAKE_CASE__ : str = to_input.resize((tile_size, tile_size), Image.BICUBIC )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super(_UpperCAmelCase, self ).__call__(image=_UpperCAmelCase, **_UpperCAmelCase ).images[0]
SCREAMING_SNAKE_CASE__ : List[str] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC )
SCREAMING_SNAKE_CASE__ : List[str] = unsqueeze_tile(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC )
SCREAMING_SNAKE_CASE__ : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
SCREAMING_SNAKE_CASE__ : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=_UpperCAmelCase ), mode="L", )
final_image.paste(
_UpperCAmelCase, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), _UpperCAmelCase )
@torch.no_grad()
def __call__( self : int, _UpperCAmelCase : Union[str, List[str]], _UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]], _UpperCAmelCase : int = 7_5, _UpperCAmelCase : float = 9.0, _UpperCAmelCase : int = 5_0, _UpperCAmelCase : Optional[Union[str, List[str]]] = None, _UpperCAmelCase : Optional[int] = 1, _UpperCAmelCase : float = 0.0, _UpperCAmelCase : Optional[torch.Generator] = None, _UpperCAmelCase : Optional[torch.FloatTensor] = None, _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, _UpperCAmelCase : int = 1, _UpperCAmelCase : int = 1_2_8, _UpperCAmelCase : int = 3_2, _UpperCAmelCase : int = 3_2, ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Image.new("RGB", (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE__ : List[str] = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE__ : str = tcx * tcy
SCREAMING_SNAKE_CASE__ : Any = 0
for y in range(_UpperCAmelCase ):
for x in range(_UpperCAmelCase ):
self._process_tile(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, prompt=_UpperCAmelCase, num_inference_steps=_UpperCAmelCase, guidance_scale=_UpperCAmelCase, noise_level=_UpperCAmelCase, negative_prompt=_UpperCAmelCase, num_images_per_prompt=_UpperCAmelCase, eta=_UpperCAmelCase, generator=_UpperCAmelCase, latents=_UpperCAmelCase, )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def _a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = "stabilityai/stable-diffusion-x4-upscaler"
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , revision="fp16" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.to("cuda" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE__ : int ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save("diffusers_library_progress.jpg" )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe(image=SCREAMING_SNAKE_CASE__ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 191
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 191
| 1
|
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ): # noqa: E741
while r - l > 1:
lowerCamelCase : Any = (l + r) // 2
if v[m] >= key:
lowerCamelCase : List[str] = m
else:
lowerCamelCase : Dict = m # noqa: E741
return r
def _a ( lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return 0
lowerCamelCase : List[Any] = [0] * len(lowerCamelCase )
lowerCamelCase : Optional[int] = 1
lowerCamelCase : str = v[0]
for i in range(1, len(lowerCamelCase ) ):
if v[i] < tail[0]:
lowerCamelCase : int = v[i]
elif v[i] > tail[length - 1]:
lowerCamelCase : Optional[Any] = v[i]
length += 1
else:
lowerCamelCase : Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__a , )
assert hasattr(self , 'env' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = {
'enabled': True,
'processes_per_host': 8,
}
__a : str = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__a : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__a : Tuple = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version='py36' , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
TrainingJobAnalytics(__a ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
__a : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __a )
| 294
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261
|
"""simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int = 10, a_: int = 1_000, a_: bool = True ):
assert (
isinstance(a_, a_ )
and isinstance(a_, a_ )
and isinstance(a_, a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __UpperCAmelCase ( a_: int, a_: int ):
return int((number_a + number_a) / 2 )
def __UpperCAmelCase ( a_: int, a_: int, a_: int ):
assert (
isinstance(a_, a_ ) and isinstance(a_, a_ ) and isinstance(a_, a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(a_: int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
_UpperCAmelCase : Tuple = lower
_UpperCAmelCase : Dict = higher
_UpperCAmelCase : Tuple = []
while True:
_UpperCAmelCase : Dict = get_avg(a_, a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
_UpperCAmelCase : Union[str, Any] = number
elif answer(a_ ) == "high":
_UpperCAmelCase : Optional[int] = number
else:
break
print(f"""guess the number : {last_numbers[-1]}""" )
print(f"""details : {last_numbers!s}""" )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = int(input("Enter lower value : " ).strip() )
_UpperCAmelCase : Dict = int(input("Enter high value : " ).strip() )
_UpperCAmelCase : Any = int(input("Enter value to guess : " ).strip() )
guess_the_number(a_, a_, a_ )
if __name__ == "__main__":
main()
| 17
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__a = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase : List[Any] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCAmelCase : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : PreTrainedTokenizerBase
UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels"
_UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features]
_UpperCAmelCase : str = len(lowerCAmelCase__ )
_UpperCAmelCase : int = len(features[0]["input_ids"] )
_UpperCAmelCase : str = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
_UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) )
_UpperCAmelCase : Any = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", a_, a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCAmelCase : Union[str, Any] = {}
if data_args.train_file is not None:
_UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase : Optional[Any] = data_args.validation_file
_UpperCAmelCase : Dict = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[int] = load_dataset(
a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCAmelCase : Dict = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )]
_UpperCAmelCase : List[Any] = "sent1"
_UpperCAmelCase : Optional[int] = "sent2"
if data_args.max_seq_length is None:
_UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_UpperCAmelCase : Dict = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]]
_UpperCAmelCase : Tuple = examples[question_header_name]
_UpperCAmelCase : Optional[Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ )
]
# Flatten out
_UpperCAmelCase : List[str] = list(chain(*a_ ) )
_UpperCAmelCase : Dict = list(chain(*a_ ) )
# Tokenize
_UpperCAmelCase : List[Any] = tokenizer(
a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : int = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples )
_UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples )
_UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCAmelCase : Optional[int] = eval_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_UpperCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a_: Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions
_UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCAmelCase : Any = Trainer(
model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, )
# Training
if training_args.do_train:
_UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : List[str] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase : str = train_result.metrics
_UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
_UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) )
trainer.log_metrics("train", a_ )
trainer.save_metrics("train", a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[Any] = trainer.evaluate()
_UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
_UpperCAmelCase : Tuple = min(a_, len(a_ ) )
trainer.log_metrics("eval", a_ )
trainer.save_metrics("eval", a_ )
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __UpperCAmelCase ( a_: int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 17
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ : str = 0
snake_case_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ : List[Any] = tuple[int, int]
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
UpperCAmelCase_ = self.g_cost + self.h_cost
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.pos_x - self.goal_x
UpperCAmelCase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case) + abs(_snake_case)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Union[str, Any] , _snake_case : Node):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
return [self.start.pos]
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def lowerCamelCase ( self : Any , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0)
UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
self.fwd_astar.closed_nodes.append(_snake_case)
self.bwd_astar.closed_nodes.append(_snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case)
else:
astar.open_nodes.append(_snake_case)
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ : Any = (0, 0)
snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : str = time.time()
snake_case_ : List[str] = AStar(init, goal)
snake_case_ : Optional[int] = a_star.search()
snake_case_ : Optional[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
snake_case_ : int = time.time()
snake_case_ : Dict = BidirectionalAStar(init, goal)
snake_case_ : str = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 51
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25
| 0
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionSAGPipeline
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE : Tuple = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = '''.'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : int = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = '''.'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : int = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : Optional[int] = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = '''.'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 19
| 1
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( a_ : Optional[Any] ) -> Union[str, Any]:
if isinstance(a_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _SCREAMING_SNAKE_CASE:
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = model(input_ids=SCREAMING_SNAKE_CASE__ ,pixel_values=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = {'''vision_model''': vision_model, '''text_model''': text_model}
__SCREAMING_SNAKE_CASE :List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(input_ids=SCREAMING_SNAKE_CASE__ ,pixel_values=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = {'''vision_model''': vision_model, '''text_model''': text_model}
__SCREAMING_SNAKE_CASE :Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = model(input_ids=SCREAMING_SNAKE_CASE__ ,pixel_values=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = model(input_ids=SCREAMING_SNAKE_CASE__ ,pixel_values=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = after_output[0]
__SCREAMING_SNAKE_CASE :Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-3 )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = {'''vision_model''': vision_model, '''text_model''': text_model}
__SCREAMING_SNAKE_CASE :List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = model(
input_ids=SCREAMING_SNAKE_CASE__ ,pixel_values=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,output_attentions=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE :int = to_atuple(vision_model.config.image_size )
__SCREAMING_SNAKE_CASE :Any = to_atuple(vision_model.config.patch_size )
__SCREAMING_SNAKE_CASE :Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__SCREAMING_SNAKE_CASE :Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
__SCREAMING_SNAKE_CASE :Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
pt_model.to(SCREAMING_SNAKE_CASE__ )
pt_model.eval()
# prepare inputs
__SCREAMING_SNAKE_CASE :Tuple = inputs_dict
__SCREAMING_SNAKE_CASE :Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__SCREAMING_SNAKE_CASE :int = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
__SCREAMING_SNAKE_CASE :int = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,len(SCREAMING_SNAKE_CASE__ ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE__ ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,len(SCREAMING_SNAKE_CASE__ ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE__ ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = VisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_flax=SCREAMING_SNAKE_CASE__ )
pt_model_loaded.to(SCREAMING_SNAKE_CASE__ )
pt_model_loaded.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE :int = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,len(SCREAMING_SNAKE_CASE__ ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE__ ,pt_output_loaded.numpy() ,4E-2 )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = VisionTextDualEncoderModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = fx_state
self.check_pt_flax_equivalence(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = VisionTextDualEncoderModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ ,fx_model.params )
self.check_pt_flax_equivalence(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE__ )
@is_pt_flax_cross_test
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE :Optional[Any] = config_inputs_dict.pop('''vision_config''' )
__SCREAMING_SNAKE_CASE :List[Any] = config_inputs_dict.pop('''text_config''' )
__SCREAMING_SNAKE_CASE :Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.check_equivalence_flax_to_pt(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = self.get_pretrained_model_and_inputs()
__SCREAMING_SNAKE_CASE :int = model_a(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = model_a(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = after_outputs[0]
__SCREAMING_SNAKE_CASE :List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-5 )
@require_flax
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=SCREAMING_SNAKE_CASE__ ,text_from_pt=SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = 13
__SCREAMING_SNAKE_CASE :List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__SCREAMING_SNAKE_CASE :str = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__SCREAMING_SNAKE_CASE :str = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = FlaxViTModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = FlaxBertModel(SCREAMING_SNAKE_CASE__ )
return vision_model, text_model
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE :Union[str, Any] = FlaxBertModelTester(self )
__SCREAMING_SNAKE_CASE :str = vit_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE :str = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = vision_config_and_inputs
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=SCREAMING_SNAKE_CASE__ ,text_from_pt=SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :str = 13
__SCREAMING_SNAKE_CASE :Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__SCREAMING_SNAKE_CASE :str = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__SCREAMING_SNAKE_CASE :Union[str, Any] = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = FlaxCLIPVisionModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = FlaxBertModel(SCREAMING_SNAKE_CASE__ )
return vision_model, text_model
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = FlaxCLIPVisionModelTester(self )
__SCREAMING_SNAKE_CASE :Tuple = FlaxBertModelTester(self )
__SCREAMING_SNAKE_CASE :str = clip_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE :Tuple = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = vision_config_and_inputs
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 )
__SCREAMING_SNAKE_CASE :Optional[int] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__SCREAMING_SNAKE_CASE :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors='''np''' )
__SCREAMING_SNAKE_CASE :Tuple = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
__SCREAMING_SNAKE_CASE :List[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,SCREAMING_SNAKE_CASE__ ,atol=1E-3 ) )
| 191
|
"""simple docstring"""
import qiskit
def __lowerCamelCase ( a_ : int , a_ : int ) -> qiskit.result.counts.Counts:
__SCREAMING_SNAKE_CASE :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__SCREAMING_SNAKE_CASE :Union[str, Any] = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__SCREAMING_SNAKE_CASE :Tuple = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase_ = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 191
| 1
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case_ ( __SCREAMING_SNAKE_CASE : str = "" ):
"""simple docstring"""
lowercase_ : Union[str, Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowercase_ : Optional[Any] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
lowercase_ : List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase_ : str = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
lowercase_ : int = get_imdb_top_aaa_movies()
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as out_file:
lowercase_ : Any = csv.writer(__SCREAMING_SNAKE_CASE )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 264
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 * 4 , __SCREAMING_SNAKE_CASE=32 * 6 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=32 , ):
"""simple docstring"""
lowercase_ : Tuple = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Dict = is_training
lowercase_ : Optional[Any] = use_auxiliary_loss
lowercase_ : Optional[Any] = num_queries
lowercase_ : Any = num_channels
lowercase_ : str = min_size
lowercase_ : str = max_size
lowercase_ : Optional[Any] = num_labels
lowercase_ : List[str] = mask_feature_size
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__SCREAMING_SNAKE_CASE )
lowercase_ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE )
lowercase_ : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5
).float()
lowercase_ : str = (torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long()
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ : Any = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = output.encoder_hidden_states
lowercase_ : List[Any] = output.pixel_decoder_hidden_states
lowercase_ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with torch.no_grad():
lowercase_ : Any = MaskFormerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Dict = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = MaskFormerForInstanceSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ : Tuple = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
lowercase_ : str = model(__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model(
pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = MaskFormerModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Dict = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase_ : Tuple = MaskFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = (self.model_tester.min_size,) * 2
lowercase_ : int = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE ),
'''class_labels''': torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE ).long(),
}
lowercase_ : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase_ : Optional[Any] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
lowercase_ : Tuple = True
lowercase_ : Optional[Any] = True
lowercase_ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : List[Any] = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase_ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase : int = 1E-4
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : Any = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase_ : Any = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
lowercase_ : Dict = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : List[str] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowercase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase_ : Any = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowercase_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowercase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : Any = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
lowercase_ : Tuple = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Union[str, Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowercase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase_ : Optional[int] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowercase_ : Dict = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowercase_ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : Union[str, Any] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase_ : Optional[int] = inputs['''pixel_values'''].to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs['''mask_labels''']]
lowercase_ : int = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase_ : List[str] = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 264
| 1
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = len(UpperCamelCase__ )
while cur > 1:
# Find the maximum number in arr
_a : str = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_a : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(UpperCamelCase__ )]
# Reverse whole list
_a : Tuple = arr[cur - 1 :: -1] + arr[cur : len(UpperCamelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
_snake_case = input('Enter numbers separated by a comma:\n').strip()
_snake_case = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 294
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = SamImageProcessor()
_a : int = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> Dict:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Dict:
_a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Optional[Any] = self.get_image_processor()
_a : int = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = [torch.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : Optional[int] = [[683, 1024]]
_a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : int = processor.post_process_masks(
UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase__ ):
_a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = tempfile.mkdtemp()
_a : Any = SamImageProcessor()
_a : Union[str, Any] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : int = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase ( self : Optional[Any] ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Any = [tf.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : str = [[683, 1024]]
_a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Union[str, Any] = processor.post_process_masks(
UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : List[Any] = [np.ones((1, 3, 5, 5) )]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : List[Any] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : List[str] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : str ) -> int:
_a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_a : str = [tf.convert_to_tensor(UpperCAmelCase__ )]
_a : Optional[int] = [torch.tensor(UpperCAmelCase__ )]
_a : Union[str, Any] = [[1764, 2646]]
_a : List[str] = [[683, 1024]]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
_a : List[str] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase ( self : str ) -> Optional[Any]:
_a : List[Any] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Dict = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 294
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase_ : List[Any] = """pt"""
elif is_tf_available():
lowerCamelCase_ : List[Any] = """tf"""
else:
lowerCamelCase_ : Tuple = """jax"""
class a__ ( __snake_case , unittest.TestCase ):
A__ : Dict = PerceiverTokenizer
A__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().setUp()
__a = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=2_0 , UpperCAmelCase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__a = []
for i in range(len(UpperCAmelCase ) ):
try:
__a = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__a = list(filter(lambda UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCAmelCase ) )
__a = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
__a = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
__a = toks + toks
# toks_str = [t[1] for t in toks]
__a = [t[0] for t in toks]
# Ensure consistency
__a = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
__a = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
__a = ' ' + output_txt
__a = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.perceiver_tokenizer
__a = 'Unicode €.'
__a = tokenizer(UpperCAmelCase )
__a = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase )
# decoding
__a = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__a = tokenizer('e è é ê ë' )
__a = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase )
# decoding
__a = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.perceiver_tokenizer
__a = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__a = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__a = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
__a = list(batch.input_ids.numpy()[0] )
else:
__a = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.perceiver_tokenizer
__a = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__a = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCAmelCase )
self.assertIn('attention_mask' , UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.perceiver_tokenizer
__a = [
'Summary of the text.',
'Another summary.',
]
__a = tokenizer(
text_target=UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# safety check on max_len default value so we are sure the test works
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__a = tempfile.mkdtemp()
__a = ' He is very happy, UNwant\u00E9d,running'
__a = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__a = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__a = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
__a = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__a = tempfile.mkdtemp()
__a = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__a = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__a = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__a = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__a = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__a = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__a = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__a = json.load(UpperCAmelCase )
__a = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
__a = added_tokens_extra_ids + [
'an_additional_special_token'
]
__a = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__a = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__a = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCAmelCase )]
__a = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__a = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__a = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 197
|
from math import sqrt
def lowerCAmelCase( __lowerCamelCase ):
__a = 0
for i in range(1 , int(sqrt(__lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCamelCase ):
total += i + n // i
elif i == sqrt(__lowerCamelCase ):
total += i
return total - n
def lowerCAmelCase( __lowerCamelCase = 1_0000 ):
__a = sum(
i
for i in range(1 , __lowerCamelCase )
if sum_of_divisors(sum_of_divisors(__lowerCamelCase ) ) == i and sum_of_divisors(__lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 197
| 1
|
"""simple docstring"""
def _A ( UpperCamelCase_ : int = 10, UpperCamelCase_ : int = 1000, UpperCamelCase_ : bool = True) -> int:
'''simple docstring'''
assert (
isinstance(UpperCamelCase_, UpperCamelCase_)
and isinstance(UpperCamelCase_, UpperCamelCase_)
and isinstance(UpperCamelCase_, UpperCamelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2)
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int, UpperCamelCase_ : int) -> None:
'''simple docstring'''
assert (
isinstance(UpperCamelCase_, UpperCamelCase_) and isinstance(UpperCamelCase_, UpperCamelCase_) and isinstance(UpperCamelCase_, UpperCamelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(UpperCamelCase_ : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
__lowercase = lower
__lowercase = higher
__lowercase = []
while True:
__lowercase = get_avg(UpperCamelCase_, UpperCamelCase_)
last_numbers.append(UpperCamelCase_)
if answer(UpperCamelCase_) == "low":
__lowercase = number
elif answer(UpperCamelCase_) == "high":
__lowercase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def _A ( ) -> None:
'''simple docstring'''
__lowercase = int(input("Enter lower value : ").strip())
__lowercase = int(input("Enter high value : ").strip())
__lowercase = int(input("Enter value to guess : ").strip())
guess_the_number(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
if __name__ == "__main__":
main()
| 17
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : int = 10 ):
if not isinstance(lowerCamelCase ,lowerCamelCase ) or n < 0:
raise ValueError('Invalid input' )
_A : Optional[Any] = 10**n
_A : List[str] = 28433 * (pow(2 ,7830457 ,lowerCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 227
| 0
|
import math
__A =1_0
__A =7
__A =BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase_ ( lowerCamelCase__ = 2_0 ):
lowerCamelCase_ = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowerCamelCase_ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 19
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : str , snake_case : Any , snake_case : Tuple )-> List[str]:
# Initialise PyTorch model
_lowerCamelCase = FunnelConfig.from_json_file(snake_case )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCamelCase = FunnelBaseModel(snake_case ) if base_model else FunnelModel(snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(snake_case , snake_case , snake_case )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
A_ : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
A_ : Optional[int] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 80
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCamelCase = [True] * (num + 1)
_lowerCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case ):
_lowerCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] =int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 80
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : int = """xlnet"""
_lowerCAmelCase : Union[str, Any] = ["""mems"""]
_lowerCAmelCase : str = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , lowercase_ : Optional[int]=32000 , lowercase_ : Optional[int]=1024 , lowercase_ : List[str]=24 , lowercase_ : Tuple=16 , lowercase_ : Tuple=4096 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=True , lowercase_ : Union[str, Any]="bi" , lowercase_ : List[Any]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0.1 , lowercase_ : int=512 , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[int]=True , lowercase_ : str=False , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=-1 , lowercase_ : List[Any]=False , lowercase_ : int="last" , lowercase_ : Tuple=True , lowercase_ : Dict="tanh" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=5 , lowercase_ : Union[str, Any]=5 , lowercase_ : Optional[Any]=5 , lowercase_ : Any=1 , lowercase_ : int=2 , **lowercase_ : Optional[Any] , ):
snake_case_ : List[str] = vocab_size
snake_case_ : int = d_model
snake_case_ : Dict = n_layer
snake_case_ : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
snake_case_ : List[Any] = d_model // n_head
snake_case_ : Any = ff_activation
snake_case_ : List[str] = d_inner
snake_case_ : Tuple = untie_r
snake_case_ : Any = attn_type
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Optional[Any] = dropout
snake_case_ : Dict = mem_len
snake_case_ : str = reuse_len
snake_case_ : Optional[int] = bi_data
snake_case_ : List[str] = clamp_len
snake_case_ : List[Any] = same_length
snake_case_ : int = summary_type
snake_case_ : str = summary_use_proj
snake_case_ : List[str] = summary_activation
snake_case_ : Dict = summary_last_dropout
snake_case_ : Optional[Any] = start_n_top
snake_case_ : Dict = end_n_top
snake_case_ : Any = bos_token_id
snake_case_ : Tuple = pad_token_id
snake_case_ : Union[str, Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , lowercase_ , )
snake_case_ : Optional[int] = kwargs['''use_cache''']
snake_case_ : int = use_mems_eval
snake_case_ : str = use_mems_train
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self : List[Any] ):
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _snake_case ( self : Any , lowercase_ : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 264
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'new-model'
if is_tf_available():
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = NewModelConfig
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = 'bert-base-cased'
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = 'bert-base-cased'
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelForPreTraining.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(lowercase__)
lowerCAmelCase__ , lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(lowercase__ , output_loading_info=lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
def __snake_case ( self : str):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
def __snake_case ( self : int):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(lowercase__)
lowerCAmelCase__ , lowerCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , output_loading_info=lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__)
lowerCAmelCase__ , lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , output_loading_info=lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
def __snake_case ( self : str):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@slow
@require_tensorflow_probability
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase__)
lowerCAmelCase__ , lowerCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase__ , output_loading_info=lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=lowercase__) , 14_410)
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=lowercase__) , 14_410)
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(lowercase__ , lowercase__)
lowerCAmelCase__ = copy.deepcopy(model.config)
lowerCAmelCase__ = ['FunnelBaseModel']
lowerCAmelCase__ = TFAutoModel.from_config(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase__)
lowerCAmelCase__ = TFAutoModel.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
def __snake_case ( self : int):
'''simple docstring'''
try:
AutoConfig.register('new-model' , lowercase__)
lowerCAmelCase__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(lowercase__):
auto_class.register(lowercase__ , lowercase__)
auto_class.register(lowercase__ , lowercase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__):
auto_class.register(lowercase__ , lowercase__)
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ = BertModelTester(self).get_config()
lowerCAmelCase__ = NewModelConfig(**tiny_config.to_dict())
lowerCAmelCase__ = auto_class.from_config(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase__)
lowerCAmelCase__ = auto_class.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __snake_case ( self : Any):
'''simple docstring'''
with self.assertRaisesRegex(
lowercase__ , 'bert-base is not a local folder and is not a valid model identifier'):
lowerCAmelCase__ = TFAutoModel.from_pretrained('bert-base')
def __snake_case ( self : Tuple):
'''simple docstring'''
with self.assertRaisesRegex(
lowercase__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
lowerCAmelCase__ = TFAutoModel.from_pretrained(lowercase__ , revision='aaaaaa')
def __snake_case ( self : str):
'''simple docstring'''
with self.assertRaisesRegex(
lowercase__ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def __snake_case ( self : str):
'''simple docstring'''
with self.assertRaisesRegex(lowercase__ , 'Use `from_pt=True` to load this model'):
lowerCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
lowerCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
lowerCAmelCase__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
lowerCAmelCase__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 119
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['pixel_values']
def __init__( self : Tuple , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : bool = True , lowercase__ : Union[int, float] = 1 / 255 , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : bool = True , **lowercase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase__)
lowerCAmelCase__ = size if size is not None else {'height': 384, 'width': 384}
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ = do_convert_rgb
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
lowerCAmelCase__ = (size['height'], size['width'])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Optional[Any] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Any , ):
'''simple docstring'''
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Any , lowercase__ : ImageInput , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Dict[str, int]] = None , lowercase__ : PILImageResampling = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[float] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : bool = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(lowercase__) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowercase__) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowercase__ , scale=lowercase__) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowercase__ , lowercase__) for image in images]
lowerCAmelCase__ = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase__)
return encoded_outputs
| 119
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__lowerCAmelCase : Tuple =get_logger(__name__)
__lowerCAmelCase : List[str] =Path(__file__).parent / """model_card_template.md"""
__lowerCAmelCase : int =uuida().hex
__lowerCAmelCase : Dict =os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
__lowerCAmelCase : Union[str, Any] =os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
__lowerCAmelCase : int =HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
lowercase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + user_agent
return ua
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Optional[str] = None ) -> int:
'''simple docstring'''
if token is None:
lowercase = HfFolder.get_token()
if organization is None:
lowercase = whoami(lowerCAmelCase__ )["""name"""]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(lowerCAmelCase__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowercase = args.hub_token if hasattr(lowerCAmelCase__ , """hub_token""" ) else None
lowercase = get_full_repo_name(lowerCAmelCase__ , token=lowerCAmelCase__ )
lowercase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase__ , model_name=lowerCAmelCase__ , repo_name=lowerCAmelCase__ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCAmelCase__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowercase = os.path.join(args.output_dir , """README.md""" )
model_card.save(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[str] , lowerCAmelCase__ :Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase = str(Path(lowerCAmelCase__ ).as_posix() )
lowercase = re.search(R"""snapshots/([^/]+)/""" , lowerCAmelCase__ )
if search is None:
return None
lowercase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__lowerCAmelCase : int =os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
__lowerCAmelCase : List[str] =os.path.join(hf_cache_home, """diffusers""")
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
lowercase = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase = old_diffusers_cache
lowercase = Path(lowerCAmelCase__ ).expanduser()
lowercase = Path(lowerCAmelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase__ )
new_blob_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
os.replace(lowerCAmelCase__ , lowerCAmelCase__ )
try:
os.symlink(lowerCAmelCase__ , lowerCAmelCase__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__lowerCAmelCase : Union[str, Any] =os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
__lowerCAmelCase : List[str] =0
else:
with open(cache_version_file) as f:
try:
__lowerCAmelCase : Any =int(f.read())
except ValueError:
__lowerCAmelCase : str =0
if cache_version < 1:
__lowerCAmelCase : List[str] =os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
__lowerCAmelCase : Union[str, Any] ="""\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
lowercase = weights_name.split(""".""" )
lowercase = splits[:-1] + [variant] + splits[-1:]
lowercase = """.""".join(lowerCAmelCase__ )
return weights_name
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , *,
lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any=None , ) -> Any:
'''simple docstring'''
lowercase = str(lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCAmelCase__ ):
if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ):
# Load from a PyTorch checkpoint
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ):
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCAmelCase__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowercase = hf_hub_download(
lowerCAmelCase__ , filename=_add_variant(lowerCAmelCase__ , lowerCAmelCase__ ) , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , lowerCAmelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )}\' so that the correct variant file can be added.' , lowerCAmelCase__ , )
try:
# 2. Load model file as usual
lowercase = hf_hub_download(
lowerCAmelCase__ , filename=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"""this model name. Check the model page at """
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 197
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _A :
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
def A__ ( self ):
"""simple docstring"""
raise NotImplementedError()
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = tokenizer
lowercase = skip_prompt
lowercase = decode_kwargs
# variables used in the streaming process
lowercase = []
lowercase = 0
lowercase = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowercase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase = text[self.print_len :]
self.print_len += len(__lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__lowerCAmelCase )
self.on_finalized_text(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
else:
lowercase = """"""
lowercase = True
self.on_finalized_text(__lowerCAmelCase , stream_end=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
print(__lowerCAmelCase , flush=__lowerCAmelCase , end="""""" if not stream_end else None )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
lowercase = Queue()
lowercase = None
lowercase = timeout
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
self.text_queue.put(__lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def A__ ( self ):
"""simple docstring"""
lowercase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 197
| 1
|
class A :
def __init__( self, UpperCamelCase__ = "", UpperCamelCase__ = False ):
"""simple docstring"""
lowerCAmelCase_ = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ = is_leaf
lowerCAmelCase_ = prefix
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 0
for q, w in zip(self.prefix, UpperCamelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
for word in words:
self.insert(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if self.prefix == word:
lowerCAmelCase_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ = RadixNode(prefix=UpperCamelCase__, is_leaf=UpperCamelCase__ )
else:
lowerCAmelCase_ = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = incoming_node.match(
UpperCamelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCamelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ = remaining_prefix
lowerCAmelCase_ = self.nodes[matching_string[0]]
lowerCAmelCase_ = RadixNode(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = aux_node
if remaining_word == "":
lowerCAmelCase_ = True
else:
self.nodes[matching_string[0]].insert(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.nodes.get(word[0], UpperCamelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = incoming_node.match(
UpperCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.nodes.get(word[0], UpperCamelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = incoming_node.match(
UpperCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCamelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ = list(self.nodes.values() )[0]
lowerCAmelCase_ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ = merging_node.nodes
return True
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ = 0 ):
"""simple docstring"""
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''banana bananas bandana band apple all beast'''.split()
lowerCAmelCase_ = RadixNode()
root.insert_many(_A )
assert all(root.find(_A ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __UpperCamelCase ( ):
assert test_trie()
def __UpperCamelCase ( ):
lowerCAmelCase_ = RadixNode()
lowerCAmelCase_ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(_A )
print('''Words:''' , _A )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A ( __UpperCAmelCase ):
__snake_case = 'gpt_neox'
def __init__( self, UpperCamelCase__=5_0432, UpperCamelCase__=6144, UpperCamelCase__=44, UpperCamelCase__=64, UpperCamelCase__=2_4576, UpperCamelCase__="gelu", UpperCamelCase__=0.25, UpperCamelCase__=1_0000, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = use_parallel_residual
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
lowerCAmelCase_ = self.rope_scaling.get('''type''', UpperCamelCase__ )
lowerCAmelCase_ = self.rope_scaling.get('''factor''', UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__, UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 167
| 0
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
A_ = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
A_ = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
A_ = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
A_ = ""
A_ = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
A_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def A_ ( snake_case , snake_case ):
assert ReadMe.from_string(snake_case , snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def A_ ( snake_case , snake_case ):
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
SCREAMING_SNAKE_CASE:List[Any] = ReadMe.from_string(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def A_ ( snake_case , snake_case ):
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(snake_case , snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def A_ ( snake_case ):
ReadMe.from_string(snake_case , snake_case , suppress_parsing_errors=snake_case )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def A_ ( snake_case , snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE:List[str] = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = ReadMe.from_readme(snake_case , snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def A_ ( snake_case , snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE:Optional[int] = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
SCREAMING_SNAKE_CASE:str = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
SCREAMING_SNAKE_CASE:int = ReadMe.from_readme(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def A_ ( snake_case , snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE:Any = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
SCREAMING_SNAKE_CASE:str = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
ReadMe.from_readme(snake_case , snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def A_ ( snake_case ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE:str = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
ReadMe.from_readme(snake_case , snake_case , suppress_parsing_errors=snake_case )
| 139
|
_lowercase: Dict = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def a( A : str ) -> int:
"""simple docstring"""
a = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
a = 0
a = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a( A : int ) -> str:
"""simple docstring"""
a = []
for arabic, roman in ROMAN:
((a) , (a)) = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
| 0
|
def __A ( __lowerCamelCase ) -> list[int]:
a = [0 for i in range(len(__lowerCamelCase ) )]
# initialize interval's left pointer and right pointer
a , a = 0, 0
for i in range(1 , len(__lowerCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
a = min(right_pointer - i + 1 , z_result[i - left_pointer] )
a = min_edge
while go_next(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
a , a = i, i + z_result[i] - 1
return z_result
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> bool:
return i + z_result[i] < len(__lowerCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __A ( __lowerCamelCase , __lowerCamelCase ) -> int:
a = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
a = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__lowerCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347
| 1
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__A , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__A , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__A )
return parser.parse_args()
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = parse_args()
# Import training_script as a module.
UpperCamelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase__ = script_fpath.stem
UpperCamelCase__ = importlib.import_module(__A )
# Patch sys.argv
UpperCamelCase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 80
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( _a ):
def __a ( self : int ) -> str:
"""simple docstring"""
lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class _A :
def __init__( self : int , _A : int , _A : int=13 , _A : Union[str, Any]=64 , _A : Optional[int]=2 , _A : Dict=3 , _A : Tuple="swish" , _A : int=3 , _A : Union[str, Any]=32 , _A : List[Any]=0.1 , _A : Union[str, Any]=0.02 , _A : Tuple=True , _A : Tuple=True , _A : Optional[int]=10 , _A : Optional[int]=None , _A : str=0.25 , _A : Any=0.0 , _A : Dict=0.0 , ) -> int:
"""simple docstring"""
lowercase : str = parent
lowercase : Union[str, Any] = batch_size
lowercase : Tuple = image_size
lowercase : int = patch_size
lowercase : List[str] = num_channels
lowercase : int = make_divisible(512 * width_multiplier , divisor=8 )
lowercase : Any = hidden_act
lowercase : Tuple = conv_kernel_size
lowercase : List[Any] = output_stride
lowercase : Tuple = classifier_dropout_prob
lowercase : Any = use_labels
lowercase : Any = is_training
lowercase : Any = num_labels
lowercase : Dict = initializer_range
lowercase : str = scope
lowercase : List[str] = width_multiplier
lowercase : Optional[Any] = ffn_dropout
lowercase : Union[str, Any] = attn_dropout
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[Any] = None
lowercase : Tuple = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __a ( self : List[str] , _A : Any , _A : List[Any] , _A : str , _A : int ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Optional[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[Any] = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : List[str] , _A : int , _A : int , _A : int , _A : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = self.num_labels
lowercase : str = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : int = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : List[str] = config_and_inputs
lowercase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _a , _a , unittest.TestCase ):
_UpperCamelCase : str = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : int = False
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : Tuple = MobileViTVaModelTester(self )
lowercase : Tuple = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def __a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __a ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[Any] = model_class(__lowerCAmelCase )
lowercase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Tuple = [*signature.parameters.keys()]
lowercase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_A : Dict , _A : Optional[int] , _A : Optional[Any] ):
lowercase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase : Union[str, Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase : Union[str, Any] = outputs.hidden_states
lowercase : Union[str, Any] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase : Dict = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def __a ( self : str ) -> int:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
lowercase : Any = self.default_image_processor
lowercase : str = prepare_img()
lowercase : Any = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase : Union[str, Any] = model(**__lowerCAmelCase )
# verify the logits
lowercase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase : Any = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : List[str] = model.to(__lowerCAmelCase )
lowercase : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : Optional[Any] = prepare_img()
lowercase : Dict = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase : Any = model(**__lowerCAmelCase )
lowercase : int = outputs.logits
# verify the logits
lowercase : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase : str = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : int = model.to(__lowerCAmelCase )
lowercase : int = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : Dict = prepare_img()
lowercase : Tuple = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase : Any = model(**__lowerCAmelCase )
lowercase : List[Any] = outputs.logits.detach().cpu()
lowercase : str = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
lowercase : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
lowercase : Tuple = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
lowercase : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 371
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def snake_case( __magic_name__ = None ) -> str:
'''simple docstring'''
lowercase : List[Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + user_agent
return ua
def snake_case( __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
lowercase : int = HfFolder.get_token()
if organization is None:
lowercase : List[str] = whoami(__magic_name__ )['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__magic_name__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowercase : Optional[Any] = args.hub_token if hasattr(__magic_name__ , '''hub_token''' ) else None
lowercase : int = get_full_repo_name(__magic_name__ , token=__magic_name__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__magic_name__ , model_name=__magic_name__ , repo_name=__magic_name__ , dataset_name=args.dataset_name if hasattr(__magic_name__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__magic_name__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__magic_name__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__magic_name__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__magic_name__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__magic_name__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__magic_name__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__magic_name__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__magic_name__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowercase : Any = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ = None ) -> int:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : Dict = str(Path(__magic_name__ ).as_posix() )
lowercase : Any = re.search(r'''snapshots/([^/]+)/''' , __magic_name__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__magic_name__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def snake_case( __magic_name__ = None , __magic_name__ = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
lowercase : str = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : str = Path(__magic_name__ ).expanduser()
lowercase : Dict = Path(__magic_name__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : List[Any] = new_cache_dir / old_blob_path.relative_to(__magic_name__ )
new_blob_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
os.replace(__magic_name__ , __magic_name__ )
try:
os.symlink(__magic_name__ , __magic_name__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def snake_case( __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
if variant is not None:
lowercase : List[str] = weights_name.split('''.''' )
lowercase : Any = splits[:-1] + [variant] + splits[-1:]
lowercase : Tuple = '''.'''.join(__magic_name__ )
return weights_name
def snake_case( __magic_name__ , *,
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] = str(__magic_name__ )
if os.path.isfile(__magic_name__ ):
return pretrained_model_name_or_path
elif os.path.isdir(__magic_name__ ):
if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) ):
# Load from a PyTorch checkpoint
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__magic_name__ , __magic_name__ , __magic_name__ ) ):
lowercase : str = os.path.join(__magic_name__ , __magic_name__ , __magic_name__ )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__magic_name__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowercase : int = hf_hub_download(
__magic_name__ , filename=_add_variant(__magic_name__ , __magic_name__ ) , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __magic_name__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__magic_name__ , __magic_name__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__magic_name__ , __magic_name__ )}' so that the correct variant file can be added.""" , __magic_name__ , )
try:
# 2. Load model file as usual
lowercase : Dict = hf_hub_download(
__magic_name__ , filename=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 116
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : List[str] = ["onnx"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self, ['onnx'] )
@classmethod
def snake_case_ ( cls, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(cls, ['onnx'] )
@classmethod
def snake_case_ ( cls, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(cls, ['onnx'] )
| 119
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119
| 1
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def __lowercase ( ) -> str:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__lowercase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__lowercase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__lowercase , help="where to store parsed gold_data_path file" , )
_A = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_A = json.load(__lowercase )
for dpr_record in tqdm(__lowercase ):
_A = dpr_record["question"]
_A = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__lowercase ) + "\n" )
if __name__ == "__main__":
main()
| 351
|
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 1
for i in range(0 , len(__lowercase ) ):
total *= numbers[i]
_A = str(__lowercase )
steps += 1
return steps
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 0
for i in range(0 , len(__lowercase ) ):
total += numbers[i]
_A = str(__lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : int , __lowerCamelCase : int = 6_55_36 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "fourier" , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __lowerCamelCase : Tuple[str] = "UNetMidBlock1D" , __lowerCamelCase : str = None , __lowerCamelCase : Tuple[int] = (32, 32, 64) , __lowerCamelCase : str = None , __lowerCamelCase : int = 8 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = False , ) -> Tuple:
super().__init__()
a = sample_size
# time
if time_embedding_type == "fourier":
a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__lowerCamelCase , log=__lowerCamelCase , flip_sin_to_cos=__lowerCamelCase )
a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__lowerCamelCase , downscale_freq_shift=__lowerCamelCase )
a = block_out_channels[0]
if use_timestep_embedding:
a = block_out_channels[0] * 4
a = TimestepEmbedding(
in_channels=__lowerCamelCase , time_embed_dim=__lowerCamelCase , act_fn=__lowerCamelCase , out_dim=block_out_channels[0] , )
a = nn.ModuleList([] )
a = None
a = nn.ModuleList([] )
a = None
# down
a = in_channels
for i, down_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
a = i == len(__lowerCamelCase ) - 1
a = get_down_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__lowerCamelCase )
# mid
a = get_mid_block(
__lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__lowerCamelCase , add_downsample=__lowerCamelCase , )
# up
a = list(reversed(__lowerCamelCase ) )
a = reversed_block_out_channels[0]
if out_block_type is None:
a = out_channels
else:
a = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = (
reversed_block_out_channels[i + 1] if i < len(__lowerCamelCase ) - 1 else final_upsample_channels
)
a = i == len(__lowerCamelCase ) - 1
a = get_up_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__lowerCamelCase )
a = output_channel
# out
a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
a = get_out_block(
out_block_type=__lowerCamelCase , num_groups_out=__lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=__lowerCamelCase , act_fn=__lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Union[torch.Tensor, float, int] , __lowerCamelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
a = timestep
if not torch.is_tensor(__lowerCamelCase ):
a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0:
a = timesteps[None].to(sample.device )
a = self.time_proj(__lowerCamelCase )
if self.config.use_timestep_embedding:
a = self.time_mlp(__lowerCamelCase )
else:
a = timestep_embed[..., None]
a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
a = ()
for downsample_block in self.down_blocks:
a , a = downsample_block(hidden_states=__lowerCamelCase , temb=__lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
a = self.mid_block(__lowerCamelCase , __lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
a = down_block_res_samples[-1:]
a = down_block_res_samples[:-1]
a = upsample_block(__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , temb=__lowerCamelCase )
# 5. post-process
if self.out_block:
a = self.out_block(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCamelCase )
| 107
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'spiece.model'}
_lowerCamelCase : Optional[int] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = 2
_lowerCamelCase : Dict = 3
_lowerCamelCase : Union[str, Any] = 4
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = """left"""
def __init__( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : List[Any]="<sep>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : Dict="<cls>" , _lowerCamelCase : str="<mask>" , _lowerCamelCase : Optional[int]=["<eop>", "<eod>"] , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : str = 3
A_ : Union[str, Any] = do_lower_case
A_ : Tuple = remove_space
A_ : int = keep_accents
A_ : Optional[Any] = vocab_file
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def a_ ( self : int ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
A_ : str = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__( self : Tuple , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : List[Any] = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
if self.remove_space:
A_ : str = ''' '''.join(inputs.strip().split() )
else:
A_ : Any = inputs
A_ : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
A_ : Any = unicodedata.normalize('''NFKD''' , _lowerCamelCase )
A_ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
A_ : str = outputs.lower()
return outputs
def a_ ( self : List[str] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.preprocess_text(_lowerCamelCase )
A_ : int = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
A_ : List[Any] = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Tuple = cur_pieces[1:]
else:
A_ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(_lowerCamelCase )
def a_ ( self : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Any = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : bool = True , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : int = kwargs.pop('''use_source_tokenizer''' , _lowerCamelCase )
A_ : List[str] = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : Any = []
A_ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
A_ : int = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
A_ : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Dict = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : Optional[int] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a_ ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1, 1]
return ([0] * len(_lowerCamelCase )) + [1, 1]
def a_ ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a_ ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 167
| 0
|
from __future__ import annotations
lowerCamelCase_ : Optional[Any] = list[tuple[int, int]]
lowerCamelCase_ : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ : str = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[Any]:
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __SCREAMING_SNAKE_CASE ( self ) -> float:
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCAmelCase ) -> bool:
return self.f_cost < other.f_cost
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCAmelCase )
__a = [self.start]
__a = []
__a = False
def __SCREAMING_SNAKE_CASE ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(UpperCAmelCase )
self.closed_nodes.append(UpperCAmelCase )
__a = self.get_successors(UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase )
else:
self.open_nodes.append(UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> list[Node]:
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase , UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase , ) )
return successors
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Path:
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase_ : Any = (0, 0)
lowerCamelCase_ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase_ : Optional[int] = GreedyBestFirst(init, goal)
lowerCamelCase_ : List[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase_ : str = 2
for elem in grid:
print(elem)
| 367
|
def lowerCAmelCase( __lowerCamelCase ):
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__a = ''
while len(__lowerCamelCase ) % 3 != 0:
__a = '0' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(__lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(__lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCamelCase ) )
oct_string += str(__lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197
| 0
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0 for i in range(len(_SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
snake_case_ , snake_case_ = 0, 0
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case_ = min_edge
while go_next(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case_ , snake_case_ = i, i + z_result[i] - 1
return z_result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
return i + z_result[i] < len(_SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347
| 1
|
from torch import nn
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 354
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = ["pixel_values"]
def __init__( self: Optional[Any] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> None:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = size if size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Optional[int] = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[int] ,) -> np.ndarray:
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: str ,) -> np.ndarray:
UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: float ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Any ,lowerCamelCase_: ImageInput ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: int = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: List[str] ,) -> BatchFeature:
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : str = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ )
if not is_batched(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = [images]
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Tuple = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : int = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Optional[int] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : str = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Dict = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 59
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self , __A ) -> int:
raise NotImplementedError()
def __lowerCAmelCase ( self ) -> Optional[Any]:
raise NotImplementedError()
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A = False , **__A ) -> int:
lowerCAmelCase_ :List[str] = tokenizer
lowerCAmelCase_ :Dict = skip_prompt
lowerCAmelCase_ :Union[str, Any] = decode_kwargs
# variables used in the streaming process
lowerCAmelCase_ :Any = []
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :List[Any] = True
def __lowerCAmelCase ( self , __A ) -> str:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowerCAmelCase_ :Tuple = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase_ :int = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase_ :int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowerCAmelCase_ :List[str] = text[self.print_len :]
lowerCAmelCase_ :Tuple = []
lowerCAmelCase_ :Tuple = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase_ :Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase_ :Tuple = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __lowerCAmelCase ( self ) -> Dict:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase_ :Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase_ :List[str] = text[self.print_len :]
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :int = 0
else:
lowerCAmelCase_ :Tuple = """"""
lowerCAmelCase_ :int = True
self.on_finalized_text(__A , stream_end=__A )
def __lowerCAmelCase ( self , __A , __A = False ) -> Optional[Any]:
print(__A , flush=__A , end="""""" if not stream_end else None )
def __lowerCAmelCase ( self , __A ) -> Dict:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A = False , __A = None , **__A ) -> Dict:
super().__init__(__A , __A , **__A )
lowerCAmelCase_ :Union[str, Any] = Queue()
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :List[Any] = timeout
def __lowerCAmelCase ( self , __A , __A = False ) -> List[str]:
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Optional[Any]:
return self
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :int = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 84
|
def __UpperCamelCase ( _lowerCAmelCase = 100_0000 ) -> int:
"""simple docstring"""
A : str = limit + 1
A : Tuple = [0] * limit
for first_term in range(1 , _lowerCAmelCase ):
for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
A : Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A : Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase__ ( _lowerCAmelCase ):
'''simple docstring'''
_lowerCamelCase = "poolformer"
def __init__( self ,lowerCamelCase_=3 ,lowerCamelCase_=1_6 ,lowerCamelCase_=1_6 ,lowerCamelCase_=3 ,lowerCamelCase_=4.0 ,lowerCamelCase_=[2, 2, 6, 2] ,lowerCamelCase_=[6_4, 1_2_8, 3_2_0, 5_1_2] ,lowerCamelCase_=[7, 3, 3, 3] ,lowerCamelCase_=[4, 2, 2, 2] ,lowerCamelCase_=[2, 1, 1, 1] ,lowerCamelCase_=4 ,lowerCamelCase_=0.0 ,lowerCamelCase_="gelu" ,lowerCamelCase_=True ,lowerCamelCase_=1E-5 ,lowerCamelCase_=0.02 ,**lowerCamelCase_ ,) -> Dict:
A = num_channels
A = patch_size
A = stride
A = padding
A = pool_size
A = hidden_sizes
A = mlp_ratio
A = depths
A = patch_sizes
A = strides
A = num_encoder_blocks
A = drop_path_rate
A = hidden_act
A = use_layer_scale
A = layer_scale_init_value
A = initializer_range
super().__init__(**_lowercase )
class lowerCamelCase__ ( _lowerCAmelCase ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self ) -> Any:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ) -> List[Any]:
return 2E-3
| 369
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 77
| 0
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase__ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
lowerCAmelCase__ = '''hopper-medium-v2'''
lowerCAmelCase__ = gym.make(env_name)
lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowerCAmelCase__ = env.reset()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase__ = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = env.step(denorm_actions)
lowerCAmelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase__ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 108
|
'''simple docstring'''
from collections.abc import Generator
def __magic_name__( ):
__lowerCAmelCase , __lowerCAmelCase = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase = b, a + b
yield b
def __magic_name__( lowerCamelCase = 1_0_0_0):
__lowerCAmelCase = 1
__lowerCAmelCase = fibonacci_generator()
while len(str(next(lowerCamelCase))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 174
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[Any] ="""transfo-xl"""
UpperCamelCase__ : Dict =["""mems"""]
UpperCamelCase__ : Optional[int] ={
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any], __lowercase : Optional[Any]=26_7735, __lowercase : int=[2_0000, 4_0000, 20_0000], __lowercase : Union[str, Any]=1024, __lowercase : Tuple=1024, __lowercase : Tuple=16, __lowercase : Optional[Any]=64, __lowercase : str=4096, __lowercase : Optional[int]=4, __lowercase : Union[str, Any]=False, __lowercase : Union[str, Any]=18, __lowercase : List[str]=1600, __lowercase : List[Any]=1000, __lowercase : Union[str, Any]=True, __lowercase : Tuple=True, __lowercase : Optional[Any]=0, __lowercase : List[str]=-1, __lowercase : int=True, __lowercase : Dict=0.1, __lowercase : Union[str, Any]=0.0, __lowercase : str=True, __lowercase : Optional[Any]="normal", __lowercase : str=0.01, __lowercase : Tuple=0.01, __lowercase : List[Any]=0.02, __lowercase : Any=1e-5, __lowercase : Union[str, Any]=0, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(__lowercase )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=__lowercase, **__lowercase )
@property
def A__ ( self : Optional[Any] ):
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def A__ ( self : List[str], __lowercase : Union[str, Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 366
|
from pathlib import Path
import fire
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
lowercase__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 224
| 0
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(lowerCAmelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(lowerCAmelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase : str =list[list[float | int]]
def UpperCAmelCase__ ( lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = matrix[row][col]
lowercase = vector[row][0]
lowercase = 0
lowercase = 0
while row < size and col < size:
# pivoting
lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase , lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
lowercase = augmented[rowa][col] / augmented[row][col]
lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(lowerCAmelCase__ )
]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> Callable[[int], int]:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
lowercase = [[0] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = (x_val + 1) ** (size - col - 1)
lowercase = y_val
lowercase = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ :int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def UpperCAmelCase__ ( lowerCAmelCase__ :Callable[[int], int] = question_function , lowerCAmelCase__ :int = 1_0 ) -> int:
'''simple docstring'''
lowercase = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase = 0
lowercase = 42
lowercase = 42
for poly in polynomials:
lowercase = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 197
| 0
|
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase__ : Union[str, Any] = {}
UpperCamelCase__ : int = source_vertex
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = {self.source_vertex}
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Any = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase__ : List[str] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
UpperCamelCase__ : Optional[int] = vertex
queue.append(UpperCamelCase_ )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase__ : str = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
UpperCamelCase__ : Optional[Any] = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + f"->{target_vertex}"
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 357
|
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
while lo < hi:
UpperCamelCase__ : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCamelCase__ : Optional[int] = mid + 1
else:
UpperCamelCase__ : Tuple = mid
return lo
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ : int = len(__UpperCAmelCase )
while lo < hi:
UpperCamelCase__ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCamelCase__ : Optional[int] = mid + 1
else:
UpperCamelCase__ : int = mid
return lo
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> None:
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> None:
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> int | None:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : List[str] = len(__UpperCAmelCase ) - 1
while left <= right:
UpperCamelCase__ : List[str] = left + (right - left) // 2
UpperCamelCase__ : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCamelCase__ : List[str] = midpoint - 1
else:
UpperCamelCase__ : List[str] = midpoint + 1
return None
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> int | None:
UpperCamelCase__ : Union[str, Any] = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int | None:
if right < left:
return None
UpperCamelCase__ : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase_ = sorted(int(item) for item in user_input.split(','))
UpperCAmelCase_ = int(input('Enter a single number to be found in the list:\n'))
UpperCAmelCase_ = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 247
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 59
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( a__ : list , a__ : int ) -> Optional[Any]:
# Checks if the entire collection has been sorted
if len(a__ ) <= 1 or n <= 1:
return
insert_next(a__ , n - 1 )
rec_insertion_sort(a__ , n - 1 )
def lowerCamelCase__ ( a__ : list , a__ : int ) -> Optional[int]:
# Checks order between adjacent elements
if index >= len(a__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCamelCase_ , UpperCamelCase_ = (
collection[index],
collection[index - 1],
)
insert_next(a__ , index + 1 )
if __name__ == "__main__":
_A = input('''Enter integers separated by spaces: ''')
_A = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 261
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 261
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ : dict ) -> set:
'''simple docstring'''
lowerCAmelCase_ :Any = set()
# edges = list of graph's edges
lowerCAmelCase_ :List[Any] = get_edges(lowercase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = edges.pop()
chosen_vertices.add(lowercase__ )
chosen_vertices.add(lowercase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase__ )
return chosen_vertices
def _snake_case ( lowercase__ : dict ) -> set:
'''simple docstring'''
lowerCAmelCase_ :Dict = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 84
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Any = logging.getLogger(__name__)
_UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_a)} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "The input training data file (a text file)."})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
lowerCamelCase__ : bool = field(default=_a , metadata={"help": "Whether ot not to use whole word mask."})
lowerCamelCase__ : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
lowerCamelCase__ : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowerCamelCase__ : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
lowerCamelCase__ : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"})
def a_ ( _lowerCAmelCase : DataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , ):
'''simple docstring'''
def _dataset(_lowerCAmelCase : Any , _lowerCAmelCase : Any=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , ref_path=_lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowercase__ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowercase__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowercase__ : int = AutoModelWithLMHead.from_config(_lowerCAmelCase )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowercase__ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ : Tuple = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ : Optional[Any] = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , evaluate=_lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ : List[str] = DataCollatorForWholeWordMask(
tokenizer=_lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
lowercase__ : str = DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ : Optional[int] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_collator=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , prediction_loss_only=_lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ : Dict = trainer.evaluate()
lowercase__ : List[Any] = math.exp(eval_output['eval_loss'] )
lowercase__ : int = {'perplexity': perplexity}
lowercase__ : int = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _lowerCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_lowerCAmelCase )
return results
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 77
| 0
|
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
lowercase_ :str = [0] * len(_a )
lowercase_ :List[str] = []
lowercase_ :Dict = [1] * len(_a )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
lowercase_ :int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase_ :List[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_a )
print(max(_a ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 252
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = False ):
lowercase_ :List[str] = scheduler
lowercase_ :Optional[Any] = optimizers if isinstance(UpperCamelCase_ , (list, tuple) ) else [optimizers]
lowercase_ :Tuple = split_batches
lowercase_ :str = step_with_optimizer
lowercase_ :int = GradientState()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowercase_ :Optional[Any] = AcceleratorState().num_processes
for _ in range(UpperCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_last_lr()
def UpperCamelCase ( self ):
return self.scheduler.state_dict()
def UpperCamelCase ( self , UpperCamelCase_ ):
self.scheduler.load_state_dict(UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_lr()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.scheduler.print_lr(*UpperCamelCase_ , **UpperCamelCase_ )
| 252
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( lowercase_ ):
def UpperCAmelCase_ ( self :Optional[Any] )-> List[str]:
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "num_encoder_blocks" ) )
class UpperCAmelCase :
def __init__( self :List[str] , lowercase_ :Tuple , lowercase_ :Any=13 , lowercase_ :Dict=64 , lowercase_ :Union[str, Any]=3 , lowercase_ :List[str]=4 , lowercase_ :Tuple=[2, 2, 2, 2] , lowercase_ :Tuple=[8, 4, 2, 1] , lowercase_ :Optional[Any]=[16, 32, 64, 1_28] , lowercase_ :Tuple=[1, 4, 8, 16] , lowercase_ :List[str]=[1, 2, 4, 8] , lowercase_ :Union[str, Any]=True , lowercase_ :Tuple=True , lowercase_ :Optional[int]="gelu" , lowercase_ :List[Any]=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :str=0.0_2 , lowercase_ :Union[str, Any]=3 , lowercase_ :Union[str, Any]=None , )-> int:
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self :List[str] )-> List[str]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Dict , lowercase_ :Dict , lowercase_ :Dict )-> Any:
A__ = SegformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Dict )-> Optional[int]:
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Union[str, Any] )-> Tuple:
A__ = 1
A__ = SegformerForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(SCREAMING_SNAKE_CASE_ )
A__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowercase = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase = True
__lowercase = False
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :int )-> Any:
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self :Tuple )-> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :Dict )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self :str )-> Union[str, Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self :Any )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def UpperCAmelCase_ ( self :Optional[int] )-> List[Any]:
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
pass
def UpperCAmelCase_ ( self :Optional[int] )-> int:
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self :Tuple )-> Union[str, Any]:
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
A__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
def check_hidden_states_output(lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :str ):
A__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self :Any )-> str:
if not self.model_tester.is_training:
return
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
A__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
A__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
A__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self :Dict )-> Tuple:
pass
@slow
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ):
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :Tuple )-> Union[str, Any]:
# only resize + normalize
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
A__ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
SCREAMING_SNAKE_CASE_ )
A__ = prepare_img()
A__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
A__ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
A__ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[str]:
# only resize + normalize
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
A__ = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(SCREAMING_SNAKE_CASE_ )
A__ = prepare_img()
A__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
A__ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
A__ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-1 ) )
@slow
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
# only resize + normalize
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
A__ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
SCREAMING_SNAKE_CASE_ )
A__ = prepare_img()
A__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
A__ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
A__ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 237
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
return False
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCAmelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : str = input("""Enter numbers separated by comma:\n""").strip()
lowercase__ : Optional[int] = [int(item.strip()) for item in user_input.split(""",""")]
lowercase__ : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
lowercase__ : int = """""" if binary_search(sequence, target) else """not """
print(f'{target} was {not_str}found in {sequence}')
| 224
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase )
class a ( _lowerCamelCase ):
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def A_ ( self : Any , lowercase_ : int ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.label_schema.copy()
snake_case_ = features[self.label_column]
snake_case_ = label_schema
return task_template
@property
def A_ ( self : Tuple ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 366
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = FlaxAutoencoderKL
@property
def A_ ( self : List[Any] ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.uniform(lowercase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
| 72
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''feature_extractor''']
lowercase__ = '''TvltImageProcessor'''
lowercase__ = '''TvltFeatureExtractor'''
def __init__( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
super().__init__(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = image_processor
A__ = feature_extractor
def __call__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : Dict=False , snake_case_ : Union[str, Any]=False , *snake_case_ : List[str] , **snake_case_ : List[Any] , ) -> List[str]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
A__ = None
if images is not None:
A__ = self.image_processor(snake_case_ , mask_pixel=snake_case_ , *snake_case_ , **snake_case_ )
if images_mixed is not None:
A__ = self.image_processor(snake_case_ , is_mixed=snake_case_ , *snake_case_ , **snake_case_ )
if audio is not None:
A__ = self.feature_extractor(
snake_case_ , *snake_case_ , sampling_rate=snake_case_ , mask_audio=snake_case_ , **snake_case_ )
A__ = {}
if audio is not None:
output_dict.update(snake_case_ )
if images is not None:
output_dict.update(snake_case_ )
if images_mixed_dict is not None:
output_dict.update(snake_case_ )
return output_dict
@property
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ = self.image_processor.model_input_names
A__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 247
| 0
|
from __future__ import annotations
import pandas as pd
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> list[int]:
__lowerCAmelCase : int = [0] * no_of_processes
__lowerCAmelCase : List[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = burst_time[i]
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : str = 999_999_999
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : int = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowerCAmelCase : Dict = remaining_time[j]
__lowerCAmelCase : List[str] = j
__lowerCAmelCase : List[Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowerCAmelCase : Optional[int] = remaining_time[short]
if minm == 0:
__lowerCAmelCase : Dict = 999_999_999
if remaining_time[short] == 0:
complete += 1
__lowerCAmelCase : List[Any] = False
# Find finish time of current process
__lowerCAmelCase : Optional[int] = increment_time + 1
# Calculate waiting time
__lowerCAmelCase : Any = finish_time - arrival_time[short]
__lowerCAmelCase : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
__lowerCAmelCase : str = 0
# Increment time
increment_time += 1
return waiting_time
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] ) -> list[int]:
__lowerCAmelCase : Any = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> None:
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 0
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = total_waiting_time + waiting_time[i]
__lowerCAmelCase : Any = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
_UpperCAmelCase = int(input())
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
_UpperCAmelCase , _UpperCAmelCase = map(int, input().split())
_UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCAmelCase = burst_time
_UpperCAmelCase = no_of_processes
_UpperCAmelCase = waiting_time
_UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 232
|
from math import isqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> list[int]:
__lowerCAmelCase : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 10**8 ) -> int:
__lowerCAmelCase : int = calculate_prime_numbers(max_number // 2 )
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 232
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCamelCase ):
__a = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__a = FlaxAutoModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def a__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCamelCase ):
__a = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
__a = FlaxAutoModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
def a__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__a = AutoTokenizer.from_pretrained(lowerCamelCase )
__a = FlaxBertModel.from_pretrained(lowerCamelCase )
__a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase ):
return model(**lowerCamelCase )
eval(**lowerCamelCase ).block_until_ready()
@slow
def a__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
__a = AutoTokenizer.from_pretrained(lowerCamelCase )
__a = FlaxRobertaModel.from_pretrained(lowerCamelCase )
__a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase ):
return model(**lowerCamelCase )
eval(**lowerCamelCase ).block_until_ready()
def a__ ( self ):
with self.assertRaisesRegex(
lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
__a = FlaxAutoModel.from_pretrained("bert-base" )
def a__ ( self ):
with self.assertRaisesRegex(
lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__a = FlaxAutoModel.from_pretrained(lowerCamelCase , revision="aaaaaa" )
def a__ ( self ):
with self.assertRaisesRegex(
lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
__a = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def a__ ( self ):
with self.assertRaisesRegex(lowerCamelCase , "Use `from_pt=True` to load this model" ):
__a = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 261
|
"""simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[int] = "▁"
A_ : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
A_ : List[str] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
A_ : Tuple = {"vinai/bartpho-syllable": 10_24}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['input_ids', 'attention_mask']
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
lowerCamelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowerCamelCase__ : int = vocab_file
lowerCamelCase__ : List[Any] = monolingual_vocab_file
lowerCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : List[Any] = cnt
cnt += 1
with open(lowerCamelCase_, 'r', encoding='utf-8' ) as f:
for line in f.readlines():
lowerCamelCase__ : Tuple = line.strip().split()[0]
lowerCamelCase__ : Any = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : Optional[Any] = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.__dict__.copy()
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ (self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_, ' ' ).strip()
return out_string
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : str = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowerCamelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase_, 'w', encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase : List[str] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCAmelCase : Any = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
UpperCAmelCase : Dict = "|".join(sys.argv[1:])
UpperCAmelCase : Optional[int] = re.compile(rf"""^({joined_dirs}).*?\.py$""")
UpperCAmelCase : List[str] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 252
|
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=0 ):
'''simple docstring'''
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[column] )
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=float("""inf""" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=float("""inf""" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase__ , lowerCamelCase__ )
# recursion
lowerCamelCase = points_counts // 2
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[:mid] , lowerCamelCase__ )
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase__ )
lowerCamelCase = dis_between_closest_in_strip(
lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
return min(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=0 )
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase : Dict = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 252
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> None:
"""simple docstring"""
lowerCAmelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('' )
print(len(lowerCAmelCase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 289
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """open-llama"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=1_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_1_0_0_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : str="silu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[str] = rms_norm_eps
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_dropout_prob
lowerCAmelCase_ : Tuple = use_stable_embedding
lowerCAmelCase_ : Optional[Any] = shared_input_output_embedding
lowerCAmelCase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
lowerCAmelCase_ : int = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 289
| 1
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase_ ( *__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Union[Dict, Any]] = None ,__lowerCamelCase : Optional[int]=True ,__lowerCamelCase : str=2 ):
from .. import __version__
lowercase_ :List[Any] = take_from
lowercase_ :Union[str, Any] = ()
if not isinstance(args[0] ,A_ ):
lowercase_ :Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(A_ ).base_version ) >= version.parse(A_ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
lowercase_ :str = None
if isinstance(A_ ,A_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(A_ ),)
lowercase_ :Dict = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(A_ ,A_ ):
values += (getattr(A_ ,A_ ),)
lowercase_ :Tuple = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
lowercase_ :Optional[int] = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
lowercase_ :Union[str, Any] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message ,A_ ,stacklevel=A_ )
if isinstance(A_ ,A_ ) and len(A_ ) > 0:
lowercase_ :int = inspect.getouterframes(inspect.currentframe() )[1]
lowercase_ :Dict = call_frame.filename
lowercase_ :Union[str, Any] = call_frame.lineno
lowercase_ :Any = call_frame.function
lowercase_ :List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(A_ ) == 0:
return
elif len(A_ ) == 1:
return values[0]
return values
| 223
|
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72
| 0
|
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , ):
__lowerCAmelCase : Union[str, Any] = cipher_alphabet or [chr(_UpperCamelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowerCAmelCase : Optional[int] = {
'a': 0.08497,
'b': 0.01492,
'c': 0.02202,
'd': 0.04253,
'e': 0.11162,
'f': 0.02228,
'g': 0.02015,
'h': 0.06094,
'i': 0.07546,
'j': 0.00153,
'k': 0.01292,
'l': 0.04025,
'm': 0.02406,
'n': 0.06749,
'o': 0.07507,
'p': 0.01929,
'q': 0.00095,
'r': 0.07587,
's': 0.06327,
't': 0.09356,
'u': 0.02758,
'v': 0.00978,
'w': 0.02560,
'x': 0.00150,
'y': 0.01994,
'z': 0.00077,
}
else:
# Custom frequencies dictionary
__lowerCAmelCase : Union[str, Any] = frequencies_dict
if not case_sensitive:
__lowerCAmelCase : int = ciphertext.lower()
# Chi squared statistic values
__lowerCAmelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_UpperCamelCase ) ):
__lowerCAmelCase : str = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowerCAmelCase : int = (alphabet_letters.index(letter.lower() ) - shift) % len(
_UpperCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowerCAmelCase : Tuple = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowerCAmelCase : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCAmelCase : str = decrypted_with_shift.lower().count(_UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCAmelCase : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCAmelCase : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCAmelCase : Union[str, Any] = decrypted_with_shift.count(_UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCAmelCase : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCAmelCase : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowerCAmelCase : List[str] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_UpperCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowerCAmelCase : int = min(
_UpperCamelCase , key=_UpperCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 182
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase : Union[str, Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__lowerCAmelCase : List[str] = F"{src_lang}-{tgt_lang}"
__lowerCAmelCase : Tuple = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase : Any = os.path.join(_UpperCamelCase , 'README.md' )
print(F"Generating {path}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split("""-""")
lowerCamelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 182
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowercase : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]) -> int:
'''simple docstring'''
for attribute in key.split("."):
__UpperCamelCase : Dict = getattr(_lowerCamelCase , _lowerCamelCase)
if weight_type is not None:
__UpperCamelCase : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase).shape
else:
__UpperCamelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__UpperCamelCase : Any = value
elif weight_type == "weight_g":
__UpperCamelCase : List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase : Any = value
elif weight_type == "bias":
__UpperCamelCase : int = value
else:
__UpperCamelCase : Tuple = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Tuple = fairseq_model.state_dict()
__UpperCamelCase : Union[str, Any] = hf_model.feature_extractor
__UpperCamelCase : Optional[int] = hf_model.adapter
for name, value in fairseq_dict.items():
__UpperCamelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : Optional[Any] = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]):
load_adapter(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
__UpperCamelCase : List[Any] = True
if "*" in mapped_key:
__UpperCamelCase : List[Any] = name.split(_lowerCamelCase)[0].split(".")[-2]
__UpperCamelCase : Any = mapped_key.replace("*" , _lowerCamelCase)
if "weight_g" in name:
__UpperCamelCase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Optional[Any] = "weight_v"
elif "bias" in name:
__UpperCamelCase : Tuple = "bias"
elif "weight" in name:
__UpperCamelCase : Optional[int] = "weight"
else:
__UpperCamelCase : Union[str, Any] = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
continue
if not is_used:
unused_weights.append(_lowerCamelCase)
logger.warning(F'Unused weights: {unused_weights}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : str = full_name.split("conv_layers.")[-1]
__UpperCamelCase : Any = name.split(".")
__UpperCamelCase : List[Any] = int(items[0])
__UpperCamelCase : Union[str, Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__UpperCamelCase : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__UpperCamelCase : List[str] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__UpperCamelCase : Any = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__UpperCamelCase : Any = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = full_name.split("adaptor.")[-1]
__UpperCamelCase : Tuple = name.split(".")
if items[1].isdigit():
__UpperCamelCase : str = int(items[1])
else:
__UpperCamelCase : Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
__UpperCamelCase : int = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.')
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
__UpperCamelCase : Union[str, Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
__UpperCamelCase : Any = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.')
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
__UpperCamelCase : Optional[int] = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.')
elif isinstance(_lowerCamelCase , _lowerCamelCase):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
__UpperCamelCase : Optional[Any] = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.')
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
__UpperCamelCase : Any = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.')
else:
unused_weights.append(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Any:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Any = emb.weight.shape
__UpperCamelCase : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
__UpperCamelCase : int = emb.weight.data
return lin_layer
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Tuple , ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Dict = WavaVecaConfig.from_pretrained(
_lowerCamelCase , add_adapter=_lowerCamelCase , adapter_stride=_lowerCamelCase , adapter_kernel_size=_lowerCamelCase , use_auth_token=_lowerCamelCase , output_hidden_size=_lowerCamelCase , )
__UpperCamelCase : Optional[Any] = MBartConfig.from_pretrained(_lowerCamelCase)
# load model
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/")[:-1]),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__UpperCamelCase : List[str] = model[0].eval()
# load feature extractor
__UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , use_auth_token=_lowerCamelCase)
# set weights for wav2vec2 encoder
__UpperCamelCase : Optional[Any] = WavaVecaModel(_lowerCamelCase)
recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase)
# load decoder weights
__UpperCamelCase : int = MBartForCausalLM(_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase)
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}')
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}')
__UpperCamelCase : Optional[int] = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase)
__UpperCamelCase : Dict = False
__UpperCamelCase : Any = MBartaaTokenizer(_lowerCamelCase)
tokenizer.save_pretrained(_lowerCamelCase)
__UpperCamelCase : Optional[int] = hf_wavavec.config.to_dict()
__UpperCamelCase : str = tokenizer.pad_token_id
__UpperCamelCase : Dict = tokenizer.bos_token_id
__UpperCamelCase : int = tokenizer.eos_token_id
__UpperCamelCase : int = "mbart50"
__UpperCamelCase : int = "wav2vec2"
__UpperCamelCase : Tuple = tokenizer.eos_token_id
__UpperCamelCase : Any = 250_004
__UpperCamelCase : Tuple = tokenizer.eos_token_id
__UpperCamelCase : Tuple = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase)
hf_wavavec.save_pretrained(_lowerCamelCase)
feature_extractor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
lowercase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 232
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase : Optional[int] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase : Optional[Any] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowercase : str = 'zero2'
lowercase : Optional[int] = 'zero3'
lowercase : Optional[Any] = [ZEROa, ZEROa]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parameterized.to_safe_name("_".join(str(_lowerCamelCase) for x in param.args))
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase : List[str] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :Dict , a :Optional[Any] , a :str ) -> Optional[int]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[str] , a :str , a :str ) -> List[Any]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[Any] , a :List[str] , a :int ) -> Optional[int]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Dict ) -> int:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
def _lowerCamelCase ( self :Any , a :List[str] ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def _lowerCamelCase ( self :Optional[Any] , a :str , a :str , a :int = 1_0 , a :bool = True , a :bool = True , a :bool = True , ) -> Any:
__UpperCamelCase : Optional[Any] = models[model]
__UpperCamelCase : List[Any] = self.run_trainer(
stage=a , model_name=a , eval_steps=a , num_train_epochs=1 , distributed=a , fpaa=a , )
self.do_checks(a )
return output_dir
def _lowerCamelCase ( self :List[str] , a :str , a :str , a :int = 1_0 , a :int = 1 , a :bool = True , a :bool = True , ) -> Dict:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir("./xxx" , after=a )
__UpperCamelCase : int = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(a )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__UpperCamelCase : Dict = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__UpperCamelCase : int = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__UpperCamelCase : Optional[Any] = self.get_launcher(a )
__UpperCamelCase : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a , env=self.get_env() )
return output_dir
def _lowerCamelCase ( self :Any , a :List[Any]=False ) -> List[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__UpperCamelCase : List[Any] = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 232
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class __magic_name__ ( lowerCAmelCase_ ):
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case = None ) -> int:
'''simple docstring'''
__a =max_length
__a =max_position_embeddings
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
__a =input_ids.shape[-1]
__a =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
'exceptions, performance degradation, or nothing at all.' )
return is_done
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
'with `max_length = start_length + max_new_tokens` instead.' , __snake_case , )
__a =start_length
__a =max_new_tokens
__a =start_length + max_new_tokens
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case = None ) -> int:
'''simple docstring'''
__a =max_time
__a =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class __magic_name__ ( lowerCAmelCase_ ):
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
return any(criteria(__snake_case , __snake_case ) for criteria in self )
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(__snake_case , __snake_case ):
return stopping_criterium.max_length
elif isinstance(__snake_case , __snake_case ):
return stopping_criterium.max_length
return None
def UpperCamelCase_( _snake_case : StoppingCriteriaList , _snake_case : int ):
"""simple docstring"""
__a =stopping_criteria.max_length
__a =deepcopy(_snake_case )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , _snake_case )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_snake_case ) )
return new_stopping_criteria
| 359
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __a ( _SCREAMING_SNAKE_CASE ) ->list[list[float]]:
a__: Dict = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
a__: str = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
a__: Dict = [[0.0, 0.0], [0.0, 0.0]]
a__ , a__: Tuple = matrix[1][1], matrix[0][0]
a__ , a__: Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
a__: Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
a__: List[str] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
a__: Tuple = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
a__: List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
a__: Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
a__: Tuple = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
a__: Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
a__: Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
a__: Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
a__: Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
a__: Any = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
a__: List[str] = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
a__: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
a__: Any = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 290
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = ['pixel_values']
def __init__( self : str , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : bool = True , **snake_case : List[Any] , ):
'''simple docstring'''
super().__init__(**snake_case )
A__ : Dict = size if size is not None else {"""shortest_edge""": 224}
A__ : Any = get_size_dict(snake_case , default_to_square=snake_case )
A__ : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A__ : List[str] = get_size_dict(snake_case , default_to_square=snake_case , param_name="""crop_size""" )
A__ : int = do_resize
A__ : Dict = size
A__ : Tuple = resample
A__ : Dict = do_center_crop
A__ : Optional[Any] = crop_size
A__ : Any = do_rescale
A__ : Dict = rescale_factor
A__ : Any = do_normalize
A__ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
A__ : Union[str, Any] = do_convert_rgb
def _UpperCamelCase ( self : Union[str, Any] , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Optional[int] , ):
'''simple docstring'''
A__ : Dict = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ : int = get_resize_output_image_size(snake_case , size=size["""shortest_edge"""] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def _UpperCamelCase ( self : Dict , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : List[Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case , size=(size["""height"""], size["""width"""]) , data_format=snake_case , **snake_case )
def _UpperCamelCase ( self : Tuple , snake_case : np.ndarray , snake_case : Union[int, float] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Tuple , ):
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _UpperCamelCase ( self : Optional[Any] , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[str] , ):
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : ImageInput , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : int = None , snake_case : bool = None , snake_case : float = None , snake_case : bool = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : bool = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **snake_case : Optional[int] , ):
'''simple docstring'''
A__ : str = do_resize if do_resize is not None else self.do_resize
A__ : Tuple = size if size is not None else self.size
A__ : Tuple = get_size_dict(snake_case , param_name="""size""" , default_to_square=snake_case )
A__ : Tuple = resample if resample is not None else self.resample
A__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : List[Any] = crop_size if crop_size is not None else self.crop_size
A__ : Union[str, Any] = get_size_dict(snake_case , param_name="""crop_size""" , default_to_square=snake_case )
A__ : int = do_rescale if do_rescale is not None else self.do_rescale
A__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
A__ : List[Any] = image_mean if image_mean is not None else self.image_mean
A__ : Optional[int] = image_std if image_std is not None else self.image_std
A__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ : int = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ : Any = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
A__ : Union[str, Any] = [to_numpy_array(snake_case ) for image in images]
if do_resize:
A__ : Optional[int] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
A__ : List[Any] = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
A__ : int = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
A__ : int = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
A__ : List[str] = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
A__ : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 359
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 0 ,lowercase = 0 ):
"""simple docstring"""
_UpperCAmelCase = right or len(lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase ,lowercase ,left + 1 ,right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
|
"""simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Dict:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=__UpperCamelCase , backbone_config=__UpperCamelCase )
# set label attributes
UpperCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''coco-detection-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[Any]=False ) -> Dict:
UpperCAmelCase_ = ''''''
if is_panoptic:
UpperCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(__UpperCamelCase )
# load original model from torch hub
UpperCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCamelCase ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCamelCase ):
if is_panoptic:
UpperCAmelCase_ = '''detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(__UpperCamelCase ) if is_panoptic else DetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCAmelCase_ = DetrImageProcessor(format=__UpperCamelCase )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = detr(__UpperCamelCase )
UpperCAmelCase_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 177
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.model'}
__UpperCamelCase : Optional[Any] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__UpperCamelCase : Optional[int] = {
'google/rembert': 256,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : Optional[int]="[SEP]" , UpperCamelCase__ : Union[str, Any]="[UNK]" , UpperCamelCase__ : Tuple="[SEP]" , UpperCamelCase__ : List[Any]="[PAD]" , UpperCamelCase__ : Any="[CLS]" , UpperCamelCase__ : Tuple="[MASK]" , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = remove_space
SCREAMING_SNAKE_CASE : Dict = keep_accents
SCREAMING_SNAKE_CASE : Dict = vocab_file
SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase__ )
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = d
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __A ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
return pieces
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.sp_model.decode_pieces(UpperCamelCase__ )
return out_string
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase__ ) )
return
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 182
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def A ( _lowercase , _lowercase , _lowercase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = is_small_dataset(_lowercase )
assert result == expected
| 182
| 1
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=30, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=10, lowerCamelCase=0.0_2, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : List[Any] = batch_size
_lowercase : Any = image_size
_lowercase : str = patch_size
_lowercase : List[Any] = num_channels
_lowercase : Any = is_training
_lowercase : int = use_labels
_lowercase : str = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : List[Any] = (image_size // patch_size) ** 2
_lowercase : Any = num_patches + 1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Any = ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
return config, pixel_values
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxViTModel(config=lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowercase : Tuple = (self.image_size, self.image_size)
_lowercase : Optional[Any] = (self.patch_size, self.patch_size)
_lowercase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.type_sequence_label_size
_lowercase : List[Any] = FlaxViTForImageClassification(config=lowerCamelCase)
_lowercase : Union[str, Any] = model(lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowercase : List[Any] = 1
_lowercase : int = FlaxViTForImageClassification(lowerCamelCase)
_lowercase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowercase : str = model(lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) ,
) : Dict = config_and_inputs
_lowercase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : int = FlaxViTModelTester(self)
_lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = model_class(lowerCamelCase)
_lowercase : int = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : str = [*signature.parameters.keys()]
_lowercase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase : Optional[Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase)
_lowercase : str = model_class(lowerCamelCase)
@jax.jit
def model_jitted(lowerCamelCase, **lowerCamelCase):
return model(pixel_values=lowerCamelCase, **lowerCamelCase)
with self.subTest('JIT Enabled'):
_lowercase : Optional[int] = model_jitted(**lowerCamelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_lowercase : Optional[Any] = model_jitted(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : Any = model_class_name.from_pretrained('google/vit-base-patch16-224')
_lowercase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24)))
self.assertIsNotNone(lowerCamelCase)
| 84
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
SCREAMING_SNAKE_CASE : Optional[Any] = "sshleifer/mar_enro_6_3_student"
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
super().setUp()
_lowercase : int = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz', extract_compressed_file=lowerCamelCase, )
_lowercase : Any = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(lowerCamelCase)
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowercase : Optional[int] = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py')[1].strip()
_lowercase : List[Any] = bash_script.replace('\\\n', '').strip().replace('"$@"', '')
for k, v in env_vars_to_replace.items():
_lowercase : str = bash_script.replace(lowerCamelCase, str(lowerCamelCase))
_lowercase : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowercase : Tuple = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowercase : int = ['finetune.py'] + bash_script.split() + args
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[int] = argparse.ArgumentParser()
_lowercase : str = pl.Trainer.add_argparse_args(lowerCamelCase)
_lowercase : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase, os.getcwd())
_lowercase : List[Any] = parser.parse_args()
_lowercase : Union[str, Any] = main(lowerCamelCase)
# Check metrics
_lowercase : Tuple = load_json(model.metrics_save_path)
_lowercase : Dict = metrics['val'][0]
_lowercase : int = metrics['val'][-1]
self.assertEqual(len(metrics['val']), (args.max_epochs / args.val_check_interval))
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''], lowerCamelCase)
self.assertGreater(last_step_stats['val_avg_gen_time'], 0.0_1)
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'], 1.0)
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'], 2)
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'], 17)
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu']), 1.1)
# check lightning ckpt can be loaded and has a reasonable statedict
_lowercase : List[Any] = os.listdir(lowerCamelCase)
_lowercase : Optional[Any] = [x for x in contents if x.endswith('.ckpt')][0]
_lowercase : List[str] = os.path.join(args.output_dir, lowerCamelCase)
_lowercase : List[Any] = torch.load(lowerCamelCase, map_location='cpu')
_lowercase : str = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowercase : int = {os.path.basename(lowerCamelCase) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test']) == 1
class _lowerCamelCase( _a ):
@timeout_decorator.timeout(6_00)
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowercase : Optional[Any] = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowercase : Optional[int] = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py')[1].strip()
)
_lowercase : Any = bash_script.replace('\\\n', '').strip().replace('"$@"', '')
_lowercase : List[str] = bash_script.replace('--fp16 ', ' ')
for k, v in env_vars_to_replace.items():
_lowercase : Optional[int] = bash_script.replace(lowerCamelCase, str(lowerCamelCase))
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : str = bash_script.replace('--fp16', '')
_lowercase : Dict = 6
_lowercase : Tuple = (
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Dict = argparse.ArgumentParser()
_lowercase : int = pl.Trainer.add_argparse_args(lowerCamelCase)
_lowercase : Tuple = SummarizationDistiller.add_model_specific_args(lowerCamelCase, os.getcwd())
_lowercase : Optional[int] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowercase : Tuple = distill_main(lowerCamelCase)
# Check metrics
_lowercase : Tuple = load_json(model.metrics_save_path)
_lowercase : Any = metrics['val'][0]
_lowercase : int = metrics['val'][-1]
assert len(metrics['val']) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''], lowerCamelCase)
# check lightning ckpt can be loaded and has a reasonable statedict
_lowercase : List[str] = os.listdir(lowerCamelCase)
_lowercase : List[Any] = [x for x in contents if x.endswith('.ckpt')][0]
_lowercase : List[str] = os.path.join(args.output_dir, lowerCamelCase)
_lowercase : Tuple = torch.load(lowerCamelCase, map_location='cpu')
_lowercase : Dict = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowercase : List[Any] = {os.path.basename(lowerCamelCase) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test']) == 1
| 84
| 1
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'''{test_file} instead.''' )
_lowerCamelCase : Dict = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_lowerCamelCase : str = components[:-1] + [test_fn.replace('.py' , '' )]
_lowerCamelCase : Dict = '''.'''.join(lowercase__ )
return test_module_path
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = get_module_path(lowercase__ )
_lowerCamelCase : Tuple = importlib.import_module(lowercase__ )
return test_module
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Any = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(lowercase__ , lowercase__ ) )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
_lowerCamelCase : Optional[int] = getattr(lowercase__ , lowercase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowerCamelCase : Any = getattr(lowercase__ , 'all_model_classes' , [] )
if len(lowercase__ ) > 0:
test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = get_test_classes(lowercase__ )
_lowerCamelCase : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = test_class()
if hasattr(lowercase__ , 'setUp' ):
test.setUp()
_lowerCamelCase : Dict = None
if hasattr(lowercase__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowerCamelCase : Optional[Any] = test.model_tester.__class__
return model_tester
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = get_test_classes(lowercase__ )
_lowerCamelCase : Tuple = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = get_test_classes_for_model(lowercase__ , lowercase__ )
_lowerCamelCase : Dict = []
for test_class in test_classes:
_lowerCamelCase : Any = get_model_tester_from_test_class(lowercase__ )
if tester_class is not None:
tester_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = get_test_classes(lowercase__ )
_lowerCamelCase : Any = {test_class: get_model_tester_from_test_class(lowercase__ ) for test_class in test_classes}
return test_tester_mapping
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = get_model_classes(lowercase__ )
_lowerCamelCase : int = {
model_class: get_test_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_test_mapping
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[Any] = get_model_classes(lowercase__ )
_lowerCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _snake_case ( lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
return o
elif isinstance(lowercase__ , lowercase__ ):
return o.__name__
elif isinstance(lowercase__ , (list, tuple) ):
return [to_json(lowercase__ ) for x in o]
elif isinstance(lowercase__ , lowercase__ ):
return {to_json(lowercase__ ): to_json(lowercase__ ) for k, v in o.items()}
else:
return o
| 96
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_UpperCamelCase = '''src/diffusers'''
# Matches is_xxx_available()
_UpperCamelCase = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
_UpperCamelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
_UpperCamelCase = '''
{0} = None
'''
_UpperCamelCase = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
_UpperCamelCase = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def UpperCamelCase_( snake_case__: List[str] ) -> Dict:
UpperCAmelCase__ = _re_backend.findall(snake_case__ )
if len(snake_case__ ) == 0:
return None
return "_and_".join(snake_case__ )
def UpperCamelCase_( ) -> Optional[int]:
with open(os.path.join(snake_case__ , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase__ = 0
UpperCAmelCase__ = {}
# Go through the end of the file
while line_index < len(snake_case__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCAmelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case__ ) and len(lines[line_index] ) > 1:
UpperCAmelCase__ = lines[line_index]
UpperCAmelCase__ = _re_single_line_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case__ ) > 0:
UpperCAmelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Union[str, Any]:
if name.isupper():
return DUMMY_CONSTANT.format(snake_case__ )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case__ , snake_case__ )
else:
return DUMMY_CLASS.format(snake_case__ , snake_case__ )
def UpperCamelCase_( snake_case__: Any=None ) -> Tuple:
if backend_specific_objects is None:
UpperCAmelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase__ = '[' + ', '.join(f"\"{b}\"" for b in backend.split('_and_' ) ) + ']'
UpperCAmelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case__ , snake_case__ ) for o in objects] )
UpperCAmelCase__ = dummy_file
return dummy_files
def UpperCamelCase_( snake_case__: List[Any]=False ) -> Optional[int]:
UpperCAmelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCAmelCase__ = os.path.join(snake_case__ , 'utils' )
UpperCAmelCase__ = {
backend: os.path.join(snake_case__ , f"dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case__ ):
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.read()
else:
UpperCAmelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py as the main "
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f"diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py. Run `make fix-copies` "
'to fix this.' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_UpperCamelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 335
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335
| 1
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : int , __snake_case : List[str] , __snake_case : Tuple=13 , __snake_case : Optional[int]=2 , __snake_case : Any=24 , __snake_case : int=16 , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=32 , __snake_case : List[Any]=5 , __snake_case : Any=4 , __snake_case : List[Any]=37 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : str=10 , __snake_case : int=0.02 , __snake_case : Tuple=None , __snake_case : str=2 , __snake_case : int=2 , ) -> Optional[int]:
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Any = patch_size
UpperCAmelCase : Tuple = max_length
UpperCAmelCase : List[str] = num_mel_bins
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Dict = scope
UpperCAmelCase : int = frequency_stride
UpperCAmelCase : Optional[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase : Any = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase : Optional[Any] = frequency_out_dimension * time_out_dimension
UpperCAmelCase : List[str] = num_patches + 2
def A ( self : Dict ) -> int:
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_values, labels
def A ( self : Any ) -> str:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A ( self : int , __snake_case : int , __snake_case : Any , __snake_case : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[str] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Dict = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any ) -> Tuple:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = ASTModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def A ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def A ( self : Tuple ) -> Tuple:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def A ( self : str ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(lowerCamelCase__ )
UpperCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Dict = [*signature.parameters.keys()]
UpperCAmelCase : List[Any] = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A ( self : Any ) -> str:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def A ( self : Any ) -> Tuple:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case_ ( ) -> Optional[Any]:
UpperCAmelCase : Tuple = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCAmelCase , UpperCAmelCase : str = torchaudio.load(_SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[int] ) -> str:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.default_feature_extractor
UpperCAmelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
UpperCAmelCase : List[str] = self.default_feature_extractor
UpperCAmelCase , UpperCAmelCase : List[str] = prepare_audio()
UpperCAmelCase : Tuple = audio.squeeze().numpy()
UpperCAmelCase : int = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 23
|
from pathlib import Path
import fire
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 296
| 0
|
"""simple docstring"""
__UpperCamelCase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Any = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__: Tuple = grid[0]
for row_n in range(1 , len(__UpperCAmelCase ) ):
lowercase__: Tuple = grid[row_n]
lowercase__: Dict = fill_row(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 0
|
"""simple docstring"""
from collections.abc import Callable
class _A :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : Callable | None = None):
# Stores actual heap items.
a : list = []
# Stores indexes of each item for supporting updates and deletion.
a : dict = {}
# Stores current size of heap.
a : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
a : Tuple = key or (lambda __UpperCAmelCase: x)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
return int((i - 1) / 2) if i > 0 else None
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int):
a : Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int):
a : Dict = int(2 * i + 2)
return right if 0 < right < self.size else None
def __snake_case ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Optional[int] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
a : Any = self.arr[j], self.arr[i]
def __snake_case ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int):
return self.arr[i][1] < self.arr[j][1]
def __snake_case ( self : List[str] , __UpperCAmelCase : int):
a : Dict = self._left(__UpperCAmelCase)
a : Tuple = self._right(__UpperCAmelCase)
a : int = i
if left is not None and not self._cmp(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[Any] = left
if right is not None and not self._cmp(__UpperCAmelCase , __UpperCAmelCase):
a : List[Any] = right
return valid_parent
def __snake_case ( self : Any , __UpperCAmelCase : int):
a : str = self._parent(__UpperCAmelCase)
while parent is not None and not self._cmp(__UpperCAmelCase , __UpperCAmelCase):
self._swap(__UpperCAmelCase , __UpperCAmelCase)
a : Any = parent, self._parent(__UpperCAmelCase)
def __snake_case ( self : str , __UpperCAmelCase : int):
a : str = self._get_valid_parent(__UpperCAmelCase)
while valid_parent != index:
self._swap(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = valid_parent, self._get_valid_parent(__UpperCAmelCase)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
if item not in self.pos_map:
return
a : Tuple = self.pos_map[item]
a : List[str] = [item, self.key(__UpperCAmelCase)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__UpperCAmelCase)
self._heapify_down(__UpperCAmelCase)
def __snake_case ( self : str , __UpperCAmelCase : int):
if item not in self.pos_map:
return
a : Union[str, Any] = self.pos_map[item]
del self.pos_map[item]
a : List[str] = self.arr[self.size - 1]
a : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__UpperCAmelCase)
self._heapify_down(__UpperCAmelCase)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Any = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(__UpperCAmelCase)])
else:
a : str = [item, self.key(__UpperCAmelCase)]
a : List[str] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def __snake_case ( self : Dict):
return self.arr[0] if self.size else None
def __snake_case ( self : Union[str, Any]):
a : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def lowercase ( )-> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase ( A_ , A_ , A_ , A_=5 )-> Union[str, Any]:
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a : List[str] = torch.tensor(tokenizer.encode(A_ , add_special_tokens=A_ ) ).unsqueeze(0 ) # Batch size 1
a : Dict = model(A_ )[0] # The last hidden-state is the first element of the output tuple
a : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a : Optional[Any] = logits[0, masked_index, :]
a : Dict = logits.softmax(dim=0 )
a , a : Any = prob.topk(k=A_ , dim=0 )
a : Optional[Any] = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A_ ) )] )
a : str = tokenizer.mask_token
a : Any = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a : Dict = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(A_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(A_ ) , A_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A_ , A_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowercase = CamembertTokenizer.from_pretrained("""camembert-base""")
__lowercase = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
__lowercase = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 226
| 0
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 84
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Dict = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
lowerCAmelCase_ :List[Any] = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock"""
lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
lowerCAmelCase_ :Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 84
| 1
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class snake_case_ :
def __UpperCamelCase ( self : List[str] ) -> str:
torch.manual_seed(0 )
lowercase__ : List[Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ : Tuple = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any ) -> Optional[int]:
lowercase__ : str = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ )
lowercase__ : Dict = inputs["prompt"]
lowercase__ : List[str] = inputs["generator"]
lowercase__ : Tuple = inputs["num_inference_steps"]
lowercase__ : Union[str, Any] = inputs["output_type"]
if "image" in inputs:
lowercase__ : Optional[int] = inputs["image"]
else:
lowercase__ : List[Any] = None
if "mask_image" in inputs:
lowercase__ : int = inputs["mask_image"]
else:
lowercase__ : Union[str, Any] = None
if "original_image" in inputs:
lowercase__ : Tuple = inputs["original_image"]
else:
lowercase__ : Tuple = None
lowercase__ : Optional[int] = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
lowercase__ : Optional[int] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ : List[str] = image
if mask_image is not None:
lowercase__ : List[Any] = mask_image
if original_image is not None:
lowercase__ : Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
lowercase__ : Dict = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
lowercase__ : str = inputs["generator"]
lowercase__ : Optional[Any] = inputs["num_inference_steps"]
lowercase__ : Any = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ : Optional[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ : int = image
if mask_image is not None:
lowercase__ : Optional[int] = mask_image
if original_image is not None:
lowercase__ : Tuple = original_image
lowercase__ : Tuple = pipe_loaded(**lowercase_ )[0]
lowercase__ : Any = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
lowercase__ : Optional[Any] = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
lowercase__ : Optional[int] = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ )
lowercase__ : Union[str, Any] = pipe_loaded(**lowercase_ )[0]
lowercase__ : List[str] = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 356
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333
| 0
|
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE_ : int = 'src/diffusers'
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
SCREAMING_SNAKE_CASE_ : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
SCREAMING_SNAKE_CASE_ : Any = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ : List[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
SCREAMING_SNAKE_CASE_ : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = _re_backend.findall(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 0:
return None
return "_and_".join(UpperCAmelCase_ )
def _snake_case ( ):
with open(os.path.join(UpperCAmelCase_ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.readlines()
# Get to the point we do the actual imports for type checking
A__ = 0
A__ = {}
# Go through the end of the file
while line_index < len(UpperCAmelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
A__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while line_index < len(UpperCAmelCase_ ) and len(lines[line_index] ) > 1:
A__ = lines[line_index]
A__ = _re_single_line_import.search(UpperCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCAmelCase_ ) > 0:
A__ = objects
else:
line_index += 1
return backend_specific_objects
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ):
if name.isupper():
return DUMMY_CONSTANT.format(UpperCAmelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCAmelCase_ , UpperCAmelCase_ )
else:
return DUMMY_CLASS.format(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Optional[Any]=None ):
if backend_specific_objects is None:
A__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
A__ = {}
for backend, objects in backend_specific_objects.items():
A__ = """[""" + """, """.join(F"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]"""
A__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCAmelCase_ , UpperCAmelCase_ ) for o in objects] )
A__ = dummy_file
return dummy_files
def _snake_case ( UpperCAmelCase_ : List[Any]=False ):
A__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
A__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
A__ = os.path.join(UpperCAmelCase_ , """utils""" )
A__ = {
backend: os.path.join(UpperCAmelCase_ , F"""dummy_{short_names.get(UpperCAmelCase_ , UpperCAmelCase_ )}_objects.py""" )
for backend in dummy_files.keys()
}
A__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.read()
else:
A__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(UpperCAmelCase_ , UpperCAmelCase_ )}_objects.py as the main """
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F"""diffusers.utils.dummy_{short_names.get(UpperCAmelCase_ , UpperCAmelCase_ )}_objects.py. Run `make fix-copies` """
"""to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 335
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
lowerCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
lowerCAmelCase = None
lowerCAmelCase = 2_0
lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__ )
# tweak scores to not be uniform anymore
lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase = jax.nn.softmax(UpperCAmelCase__ , axis=-1 )
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = None
lowerCAmelCase = 1_0
lowerCAmelCase = 2
# create ramp distribution
lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase = FlaxTopKLogitsWarper(3 )
lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase = 5
lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCAmelCase = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = None
lowerCAmelCase = 1_0
lowerCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = 2_0
lowerCAmelCase = 4
lowerCAmelCase = 0
lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ )
# check that min length is applied at length 5
lowerCAmelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
lowerCAmelCase = 5
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = 1_5
lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __UpperCAmelCase ( self : Dict ) -> str:
lowerCAmelCase = 2_0
lowerCAmelCase = 4
lowerCAmelCase = 0
lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 )
lowerCAmelCase = 1
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase = 3
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = 2_0
lowerCAmelCase = 4
lowerCAmelCase = 0
lowerCAmelCase = 5
lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 )
lowerCAmelCase = 4
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase = 3
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase = 4
lowerCAmelCase = 1_0
lowerCAmelCase = 1_5
lowerCAmelCase = 2
lowerCAmelCase = 1
lowerCAmelCase = 1_5
# dummy input_ids and scores
lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
lowerCAmelCase = input_ids.copy()
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scores.copy()
# instantiate all dist processors
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase = FlaxTopKLogitsWarper(3 )
lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = 1_0
# no processor list
lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# with processor list
lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = 4
lowerCAmelCase = 1_0
lowerCAmelCase = 1_5
lowerCAmelCase = 2
lowerCAmelCase = 1
lowerCAmelCase = 1_5
# dummy input_ids and scores
lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
lowerCAmelCase = input_ids.copy()
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scores.copy()
# instantiate all dist processors
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase = FlaxTopKLogitsWarper(3 )
lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = 1_0
# no processor list
def run_no_processor_list(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
# with processor list
def run_processor_list(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ):
lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
lowerCAmelCase = jax.jit(UpperCAmelCase__ )
lowerCAmelCase = jax.jit(UpperCAmelCase__ )
lowerCAmelCase = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 358
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : str=4 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __UpperCAmelCase ( self : Any ) -> List[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('roberta-base' , from_pt=UpperCAmelCase__ )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 55
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__a :Optional[int] = logging.get_logger(__name__)
__a :Tuple = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 'codegen'
_lowerCamelCase : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , UpperCAmelCase : str=50400 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : Tuple=4096 , UpperCAmelCase : str=28 , UpperCAmelCase : int=16 , UpperCAmelCase : int=64 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]="gelu_new" , UpperCAmelCase : str=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=1E-5 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=50256 , UpperCAmelCase : Dict=50256 , UpperCAmelCase : Dict=False , **UpperCAmelCase : Any , ):
A_ = vocab_size
A_ = n_ctx
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = rotary_dim
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = use_cache
A_ = bos_token_id
A_ = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , UpperCAmelCase ):
# TODO: how to do that better?
A_ = 0
@property
def __A ( self : Dict ):
A_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="inputs" )
A_ = {0: "batch", 1: "past_sequence + sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def __A ( self : Optional[Any] ):
return self._config.n_head
def __A ( self : Optional[int] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
A_ = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
A_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A_ , A_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A_ = seqlen + 2
A_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
A_ = common_inputs["attention_mask"]
if self.use_past:
A_ = ordered_inputs["attention_mask"].dtype
A_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def __A ( self : str ):
return 13
| 312
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9
| 0
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1 ) -> List[str]:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=0 ) -> int:
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(_UpperCAmelCase , n_shave_prefix_segments=_UpperCAmelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(_UpperCAmelCase )
UpperCAmelCase = key.reshape(_UpperCAmelCase )
UpperCAmelCase = value.reshape(_UpperCAmelCase )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(_UpperCAmelCase )
}
for i in range(1 , _UpperCAmelCase ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
UpperCAmelCase = {'''old''': F"""input_blocks.{i}.0""", '''new''': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=_UpperCAmelCase )
if len(_UpperCAmelCase ):
UpperCAmelCase = renew_attention_paths(_UpperCAmelCase )
UpperCAmelCase = {
'''old''': F"""input_blocks.{i}.1""",
'''new''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
F"""input_blocks.{i}.1.qkv.bias""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase )
UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , config=_UpperCAmelCase )
UpperCAmelCase = renew_attention_paths(_UpperCAmelCase )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , attention_paths_to_split=_UpperCAmelCase , config=_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(_UpperCAmelCase , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase = layer.split('''.''' )[0], shave_segments(_UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_UpperCAmelCase )
else:
UpperCAmelCase = [layer_name]
if len(_UpperCAmelCase ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase )
UpperCAmelCase = {'''old''': F"""output_blocks.{i}.0""", '''new''': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(_UpperCAmelCase ) == 2:
UpperCAmelCase = []
if len(_UpperCAmelCase ):
UpperCAmelCase = renew_attention_paths(_UpperCAmelCase )
UpperCAmelCase = {
'''old''': F"""output_blocks.{i}.1""",
'''new''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
F"""output_blocks.{i}.1.qkv.bias""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_UpperCAmelCase , )
else:
UpperCAmelCase = renew_resnet_paths(_UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(_UpperCAmelCase ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(_UpperCAmelCase ), '''resnets''', str(_UpperCAmelCase ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__A : Tuple = parser.parse_args()
__A : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A : str = json.loads(f.read())
__A : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A : List[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A : Tuple = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
__A : str = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
__A : Tuple = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 273
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def a ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Optional[int] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Any = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Tuple = 8
else:
__UpperCAmelCase : Optional[int] = None
return tokenizer.pad(
_UpperCAmelCase , padding='''longest''' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__UpperCAmelCase : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCAmelCase ) == "1":
__UpperCAmelCase : Dict = 2
# Initialize accelerator
__UpperCAmelCase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : List[Any] = config['''lr''']
__UpperCAmelCase : Optional[Any] = int(config['''num_epochs'''] )
__UpperCAmelCase : Optional[int] = int(config['''seed'''] )
__UpperCAmelCase : Any = int(config['''batch_size'''] )
__UpperCAmelCase : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase : List[str] = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Any = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
__UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : int = model(**_UpperCAmelCase )
__UpperCAmelCase : str = outputs.loss
__UpperCAmelCase : str = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__UpperCAmelCase : Tuple = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Dict = model(**_UpperCAmelCase )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__UpperCAmelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCAmelCase : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
__UpperCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__UpperCAmelCase : int = parser.parse_args()
__UpperCAmelCase : Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226
| 0
|
from jiwer import compute_measures
import datasets
SCREAMING_SNAKE_CASE :Any = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/jitsi/jiwer/"] ,reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] ,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str]=None ,A : str=None ,A : str=False ):
if concatenate_texts:
return compute_measures(A ,A )["wer"]
else:
__A = 0
__A = 0
for prediction, reference in zip(A ,A ):
__A = compute_measures(A ,A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 124
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :Tuple = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :int = 'adapter_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Any = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :int = 'tf_model.h5'
SCREAMING_SNAKE_CASE :Tuple = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.ckpt'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors'
SCREAMING_SNAKE_CASE :Any = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :int = 'config.json'
SCREAMING_SNAKE_CASE :List[str] = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[int] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[Any] = 'generation_config.json'
SCREAMING_SNAKE_CASE :Dict = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[Any] = '▁'
SCREAMING_SNAKE_CASE :Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :Tuple = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 124
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __A( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """pegasus"""
SCREAMING_SNAKE_CASE__ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self , SCREAMING_SNAKE_CASE_=5_02_65 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def UpperCAmelCase_ (self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ (self ):
return self.d_model
| 244
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__snake_case : Optional[Any] = None
__snake_case : Optional[Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__snake_case : Union[str, Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str=1, __snake_case : Tuple=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCamelCase ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
return json.load(__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Dict:
"""simple docstring"""
with open(__snake_case, """w""" ) as f:
json.dump(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any, __snake_case : Any, __snake_case : Tuple=True ) -> Any:
"""simple docstring"""
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : List[Any] =os.path.join(__snake_case, """tmp""" )
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : Dict =read_json(os.path.join(__snake_case, """params.json""" ) )
A__ : Dict =NUM_SHARDS[model_size]
A__ : List[str] =params["""n_layers"""]
A__ : int =params["""n_heads"""]
A__ : str =n_heads // num_shards
A__ : Tuple =params["""dim"""]
A__ : Union[str, Any] =dim // n_heads
A__ : str =1_00_00.0
A__ : Any =1.0 / (base ** (torch.arange(0, __snake_case, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ : Optional[Any] =params["""n_kv_heads"""] # for GQA / MQA
A__ : int =n_heads_per_shard // num_key_value_heads
A__ : int =dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ : List[Any] =n_heads
A__ : List[str] =n_heads_per_shard
A__ : Dict =dim
# permute for sliced rotary
def permute(__snake_case : Tuple, __snake_case : Optional[int]=n_heads, __snake_case : int=dim, __snake_case : Optional[Any]=dim ):
return w.view(__snake_case, dima // n_heads // 2, 2, __snake_case ).transpose(1, 2 ).reshape(__snake_case, __snake_case )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ : List[str] =torch.load(os.path.join(__snake_case, """consolidated.00.pth""" ), map_location="""cpu""" )
else:
# Sharded
A__ : Optional[Any] =[
torch.load(os.path.join(__snake_case, f"consolidated.{i:02d}.pth" ), map_location="""cpu""" )
for i in range(__snake_case )
]
A__ : Optional[Any] =0
A__ : str ={"""weight_map""": {}}
for layer_i in range(__snake_case ):
A__ : Dict =f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Dict ={
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ : Any ={
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ : Optional[Any] =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ) )
A__ : int =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ), __snake_case, __snake_case, __snake_case, )
A__ : int =torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(__snake_case )], dim=1 )
A__ : Optional[int] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__snake_case )], dim=0 )
A__ : str =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__snake_case )], dim=1 )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__snake_case )], dim=0 )
A__ : List[Any] =inv_freq
for k, v in state_dict.items():
A__ : Optional[Any] =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
A__ : Tuple =f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Tuple ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
A__ : Any ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__snake_case )], dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__snake_case )], dim=0 ),
}
for k, v in state_dict.items():
A__ : int =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
# Write configs
A__ : Union[str, Any] ={"""total_size""": param_count * 2}
write_json(__snake_case, os.path.join(__snake_case, """pytorch_model.bin.index.json""" ) )
A__ : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
A__ : List[Any] =params["""multiple_of"""] if """multiple_of""" in params else 256
A__ : int =LlamaConfig(
hidden_size=__snake_case, intermediate_size=compute_intermediate_size(__snake_case, __snake_case, __snake_case ), num_attention_heads=params["""n_heads"""], num_hidden_layers=params["""n_layers"""], rms_norm_eps=params["""norm_eps"""], num_key_value_heads=__snake_case, )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
A__ : List[Any] =LlamaForCausalLM.from_pretrained(__snake_case, torch_dtype=torch.floataa, low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__snake_case, safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Dict ) -> Tuple:
"""simple docstring"""
A__ : List[Any] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ : List[str] =tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""", help="""Location of LLaMA weights, which contains tokenizer.model and model folders""", )
parser.add_argument(
"""--model_size""", choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""], )
parser.add_argument(
"""--output_dir""", help="""Location to write HF model and tokenizer""", )
parser.add_argument("""--safe_serialization""", type=__snake_case, help="""Whether or not to save using `safetensors`.""" )
A__ : Any =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ : List[Any] =os.path.join(args.input_dir, """tokenizer.model""" )
write_tokenizer(args.output_dir, __snake_case )
if __name__ == "__main__":
main()
| 136
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A__ : Any =4
A__ : int =(1 << p) - 1
for _ in range(p - 2 ):
A__ : Dict =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 136
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : List[Any] =logging.get_logger(__name__)
A__ : Any ="""▁"""
A__ : Dict ={"""vocab_file""": """sentencepiece.bpe.model"""}
A__ : Dict ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
A__ : str ={
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
A__ : List[str] =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = VOCAB_FILES_NAMES
_lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase: List[str] = ['''input_ids''', '''attention_mask''']
_lowercase: List[Any] = []
_lowercase: List[Any] = []
def __init__( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Any="<s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Any="</s>" , __snake_case : Union[str, Any]="<s>" , __snake_case : Dict="<unk>" , __snake_case : Tuple="<pad>" , __snake_case : Any="<mask>" , __snake_case : Union[str, Any]=None , __snake_case : Any=None , __snake_case : List[str]=None , __snake_case : List[Any] = None , __snake_case : Optional[int]=None , __snake_case : Dict=False , **__snake_case : Tuple , ) -> Optional[int]:
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
_lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
_lowerCAmelCase = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ) -> str:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __snake_case : Tuple ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : Tuple ) -> List[str]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
return self._src_lang
@src_lang.setter
def lowercase__ ( self : List[Any] , __snake_case : List[Any] ) -> List[Any]:
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] = None , __snake_case : int = False ) -> Dict:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase__ ( self : Tuple , __snake_case : str , __snake_case : Tuple = None ) -> List[str]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : List[Any] = None ) -> Union[str, Any]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : List[str] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , **__snake_case : List[str] ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
_lowerCAmelCase = self.convert_tokens_to_ids(__snake_case )
_lowerCAmelCase = tgt_lang_id
return inputs
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase__ ( self : Dict , __snake_case : Any ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : Union[str, Any] , __snake_case : int ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Optional[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip()
return out_string
def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : List[str] = None ) -> int:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowercase__ ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : int = "eng_Latn" , __snake_case : Union[str, Any] = None , __snake_case : Dict = "fra_Latn" , **__snake_case : Any , ) -> Dict:
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Tuple ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Any , __snake_case : List[str] ) -> Dict:
_lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
def lowercase__ ( self : List[str] , __snake_case : str ) -> Optional[Any]:
_lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
| 70
|
'''simple docstring'''
import os
def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
lowerCamelCase_ = in_file.read()
lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()]
lowerCamelCase_ = [[0 for cell in row] for row in grid]
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
lowerCamelCase_ = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
return [ord(lowerCamelCase_ ) - 96 for elem in plain]
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase__( ):
lowercase_ : Optional[Any] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase_ )
print('Decoded:' , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowercase__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowercase__ = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase_ : Dict = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase_ : List[str] = char
return pairs
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[Any] = VOCAB_FILES_NAMES
a_ : Any = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , a_ : List[str] , a_ : Any , a_ : List[Any]="replace" , a_ : Union[str, Any]="<s>" , a_ : str="</s>" , a_ : str="</s>" , a_ : Any="<s>" , a_ : Optional[Any]="<unk>" , a_ : Tuple="<pad>" , a_ : Optional[Any]="<mask>" , a_ : Any=False , **a_ : str , ):
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : str = bytes_to_unicode()
lowerCAmelCase_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCamelCase ( self : Tuple ):
return len(self.encoder )
def lowerCamelCase ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Union[str, Any] , a_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : str = min(lowerCAmelCase__ , key=lambda a_ : self.bpe_ranks.get(lowerCAmelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ : Any = bigram
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : List[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = ''' '''.join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = word
return word
def lowerCamelCase ( self : Tuple , a_ : List[Any] ):
lowerCAmelCase_ : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
lowerCAmelCase_ : str = ''''''.join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def lowerCamelCase ( self : Dict , a_ : Dict ):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : int , a_ : List[str] ):
return self.decoder.get(lowerCAmelCase__ )
def lowerCamelCase ( self : str , a_ : str ):
lowerCAmelCase_ : Dict = ''''''.join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCamelCase ( self : Optional[int] , a_ : str , a_ : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Optional[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Tuple = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCamelCase ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def lowerCamelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : str , a_ : Union[str, Any] , a_ : List[str]=False , **a_ : Any ):
lowerCAmelCase_ : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : int = ''' ''' + text
return (text, kwargs)
| 241
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_lowerCamelCase = TypeVar("""KT""")
_lowerCamelCase = TypeVar("""VT""")
class _snake_case (Generic[KT, VT]):
def __init__( self ,_snake_case = "root" ,_snake_case = None ):
UpperCAmelCase_ : Union[str, Any] = key
UpperCAmelCase_ : int = value
UpperCAmelCase_ : list[Node[KT, VT]] = []
def __repr__( self ):
return f'''Node({self.key}: {self.value})'''
@property
def UpperCamelCase__ ( self ):
return len(self.forward )
class _snake_case (Generic[KT, VT]):
def __init__( self ,_snake_case = 0.5 ,_snake_case = 16 ):
UpperCAmelCase_ : Node[KT, VT] = Node[KT, VT]()
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[Any] = p
UpperCAmelCase_ : List[str] = max_level
def __str__( self ):
UpperCAmelCase_ : str = list(self )
if len(_snake_case ) == 0:
return f'''SkipList(level={self.level})'''
UpperCAmelCase_ : Union[str, Any] = max((len(str(_snake_case ) ) for item in items) ,default=4 )
UpperCAmelCase_ : Tuple = max(_snake_case ,4 ) + 4
UpperCAmelCase_ : Any = self.head
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Union[str, Any] = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(_snake_case ,"-" ) + "* " * len(_snake_case ) )
lines.append(" " * label_size + "| " * len(_snake_case ) )
while len(node.forward ) != 0:
UpperCAmelCase_ : Dict = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(_snake_case ,"-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(_snake_case ) )
UpperCAmelCase_ : str = node.forward
lines.append("None".ljust(_snake_case ) + "* " * len(_snake_case ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(_snake_case )
def __iter__( self ):
UpperCAmelCase_ : Optional[Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCAmelCase_ : List[str] = node.forward[0]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Union[str, Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCAmelCase_ : List[str] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = self._locate_node(_snake_case )
if node is not None:
for i, update_node in enumerate(_snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCAmelCase_ : Optional[int] = node.forward[i]
else:
UpperCAmelCase_ : List[str] = update_node.forward[:i]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : int = self._locate_node(_snake_case )
if node is not None:
UpperCAmelCase_ : List[Any] = value
else:
UpperCAmelCase_ : Any = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,_snake_case ):
update_vector.append(self.head )
UpperCAmelCase_ : Tuple = level
UpperCAmelCase_ : Union[str, Any] = Node(_snake_case ,_snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_snake_case )
else:
UpperCAmelCase_ : Optional[int] = new_node
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._locate_node(_snake_case )
if node is not None:
return node.value
return None
def a__ ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCAmelCase_ : List[Any] = skip_list.head
UpperCAmelCase_ : Dict = {}
while node.level != 0:
UpperCAmelCase_ : str = node.forward[0]
UpperCAmelCase_ : Union[str, Any] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a__ ( ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCAmelCase_ : Optional[int] = skip_list.head
UpperCAmelCase_ : int = {}
while node.level != 0:
UpperCAmelCase_ : Optional[int] = node.forward[0]
UpperCAmelCase_ : Any = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = SkipList()
assert skip_list.find("Some key" ) is None
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def a__ ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 1_42 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_SCREAMING_SNAKE_CASE : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
def is_sorted(_SCREAMING_SNAKE_CASE : Dict ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
UpperCAmelCase_ : Any = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def a__ ( ) -> List[str]:
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 67
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int ="mra"
def __init__( self ,_snake_case=5_02_65 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=1 ,_snake_case=0.02 ,_snake_case=1E-5 ,_snake_case="absolute" ,_snake_case=4 ,_snake_case="full" ,_snake_case=0 ,_snake_case=0 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Optional[Any] = block_per_row
UpperCAmelCase_ : Any = approx_mode
UpperCAmelCase_ : Dict = initial_prior_first_n_blocks
UpperCAmelCase_ : str = initial_prior_diagonal_n_blocks
| 67
| 1
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {'vocab_file': 'spiece.model'}
lowerCamelCase : Tuple = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowerCamelCase : Optional[int] = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = []
def __init__( self , A , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , A = None , **A , ) -> None:
snake_case : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
snake_case : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
snake_case : Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
snake_case : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
snake_case : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
snake_case : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , mask_token=A , cls_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Optional[Any] = vocab_file
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : int = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
snake_case : Optional[int] = self.__dict__.copy()
snake_case : Dict = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : int = {}
snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Any:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : List[str] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Optional[Any] = []
snake_case : Union[str, Any] = """"""
snake_case : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
snake_case : Tuple = True
snake_case : Any = []
else:
current_sub_tokens.append(A )
snake_case : Optional[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A = False , A = None , A = True , **A , ) -> str:
snake_case : str = kwargs.pop("""use_source_tokenizer""" , A )
snake_case : Optional[Any] = self.convert_ids_to_tokens(A , skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case : Any = []
snake_case : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
snake_case : Tuple = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case : Any = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(A ) )
else:
snake_case : Union[str, Any] = """""".join(A )
snake_case : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case : List[str] = self.clean_up_tokenization(A )
return clean_text
else:
return text
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Union[str, Any] = [self.cls_token_id]
snake_case : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Any = [self.sep_token_id]
snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 124
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""image_processor""", """tokenizer"""]
_snake_case = """FlavaImageProcessor"""
_snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , A=None , A=None , **A ) -> Tuple:
snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
snake_case : Dict = self.image_processor
def __call__( self , A = None , A = None , A = True , A = False , A = False , A = None , A = 0 , A = None , A = None , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> Tuple:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case : str = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
if images is not None:
snake_case : Tuple = self.image_processor(
A , return_image_mask=A , return_codebook_pixels=A , return_tensors=A , **A , )
if text is not None and images is not None:
encoding.update(A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def UpperCAmelCase ( self , *A , **A ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> int:
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.tokenizer.model_input_names
snake_case : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 124
| 1
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = """efficientformer"""
def __init__( self , lowerCAmelCase__ = [3, 2, 6, 4] , lowerCAmelCase__ = [48, 96, 224, 448] , lowerCAmelCase__ = [True, True, True, True] , lowerCAmelCase__ = 448 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 7 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 1e-5 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1e-12 , lowerCAmelCase__ = 224 , lowerCAmelCase__ = 1e-05 , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 357
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.