code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import os
import sys
import unittest
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCamelCase = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
_UpperCamelCase = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = get_test_to_tester_mapping(A_ )
__lowerCAmelCase : List[str] = get_test_to_tester_mapping(A_ )
__lowerCAmelCase : Tuple = {'''BertModelTest''': '''BertModelTester'''}
__lowerCAmelCase : List[str] = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = get_model_to_test_mapping(A_ )
__lowerCAmelCase : Optional[Any] = get_model_to_test_mapping(A_ )
__lowerCAmelCase : Optional[int] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__lowerCAmelCase : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = get_model_to_tester_mapping(A_ )
__lowerCAmelCase : Union[str, Any] = get_model_to_tester_mapping(A_ )
__lowerCAmelCase : Any = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__lowerCAmelCase : int = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
| 275
|
def _lowercase ( lowercase__ = 2_0_0 ):
__lowerCAmelCase : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__lowerCAmelCase : Dict = [0] * (pence + 1)
__lowerCAmelCase : Optional[int] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 275
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Any = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape
lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ):
lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
lowerCAmelCase : List[str] = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Any = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
for idx, shard in enumerate(_snake_case ):
lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
lowerCAmelCase : List[Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {'''total_size''': total_size}
lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : str = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(default=A , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCamelCase = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
UpperCamelCase = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
UpperCamelCase = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
UpperCamelCase = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''Source language id for translation.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''Target language id for translation.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F"{split}_results.json" ) )
def A ( ) -> List[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCAmelCase = SeqaSeqDataset
# Get datasets
_UpperCAmelCase = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCAmelCase = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCAmelCase = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
_UpperCAmelCase = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
_UpperCAmelCase = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
_UpperCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(metric_key_prefix='val' )
_UpperCAmelCase = data_args.n_val
_UpperCAmelCase = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
_UpperCAmelCase = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
_UpperCAmelCase = test_output.metrics
_UpperCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCAmelCase = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
_UpperCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
_UpperCAmelCase = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def A ( _UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339
| 1
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
if isinstance(_lowercase , torch.Tensor ):
return image
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
UpperCAmelCase : List[Any] = np.concatenate(_lowercase , axis=0 )
UpperCAmelCase : List[Any] = np.array(_lowercase ).astype(np.floataa ) / 255.0
UpperCAmelCase : List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : int = 2.0 * image - 1.0
UpperCAmelCase : str = torch.from_numpy(_lowercase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : str = torch.cat(_lowercase , dim=0 )
return image
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=0.9995 ) -> Tuple:
if not isinstance(_lowercase , np.ndarray ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : Union[str, Any] = va.device
UpperCAmelCase : List[str] = va.cpu().numpy()
UpperCAmelCase : List[Any] = va.cpu().numpy()
UpperCAmelCase : Optional[Any] = np.sum(va * va / (np.linalg.norm(_lowercase ) * np.linalg.norm(_lowercase )) )
if np.abs(_lowercase ) > DOT_THRESHOLD:
UpperCAmelCase : str = (1 - t) * va + t * va
else:
UpperCAmelCase : Dict = np.arccos(_lowercase )
UpperCAmelCase : Union[str, Any] = np.sin(_lowercase )
UpperCAmelCase : List[str] = theta_a * t
UpperCAmelCase : Union[str, Any] = np.sin(_lowercase )
UpperCAmelCase : Optional[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase : Dict = sin_theta_t / sin_theta_a
UpperCAmelCase : List[Any] = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase : str = torch.from_numpy(_lowercase ).to(_lowercase )
return va
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = F.normalize(_lowercase , dim=-1 )
UpperCAmelCase : Optional[int] = F.normalize(_lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
for param in model.parameters():
UpperCAmelCase : Optional[int] = value
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> List[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
UpperCAmelCase : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size["""shortest_edge"""]
)
UpperCAmelCase : Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def _lowercase( self , A = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Optional[Any]:
self.enable_attention_slicing(A )
def _lowercase( self ) -> List[str]:
set_requires_grad(self.vae , A )
def _lowercase( self ) -> str:
set_requires_grad(self.vae , A )
def _lowercase( self ) -> Tuple:
set_requires_grad(self.unet , A )
def _lowercase( self ) -> Optional[int]:
set_requires_grad(self.unet , A )
def _lowercase( self , A , A , A ) -> Any:
# get the original timestep using init_timestep
UpperCAmelCase : Dict = min(int(num_inference_steps * strength ) , A )
UpperCAmelCase : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase( self , A , A , A , A , A , A=None ) -> Dict:
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
UpperCAmelCase : Tuple = image.to(device=A , dtype=A )
if isinstance(A , A ):
UpperCAmelCase : Optional[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
UpperCAmelCase : List[str] = torch.cat(A , dim=0 )
else:
UpperCAmelCase : List[Any] = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase : Tuple = 0.1_8_2_1_5 * init_latents
UpperCAmelCase : Optional[int] = init_latents.repeat_interleave(A , dim=0 )
UpperCAmelCase : Tuple = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
UpperCAmelCase : str = self.scheduler.add_noise(A , A , A )
UpperCAmelCase : str = init_latents
return latents
def _lowercase( self , A ) -> Tuple:
UpperCAmelCase : str = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCAmelCase : Dict = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def _lowercase( self , A , A ) -> int:
UpperCAmelCase : Optional[Any] = self.feature_extractor.preprocess(A )
UpperCAmelCase : Any = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase : str = self.clip_model.get_image_features(A )
UpperCAmelCase : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
UpperCAmelCase : Tuple = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase( self , A , A , A , A , A , A , A , ) -> str:
UpperCAmelCase : Optional[Any] = latents.detach().requires_grad_()
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : str = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase : Dict = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase : str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase : Optional[Any] = torch.sqrt(A )
UpperCAmelCase : Optional[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
UpperCAmelCase : List[Any] = self.scheduler.sigmas[index]
UpperCAmelCase : Dict = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase : Optional[Any] = 1 / 0.1_8_2_1_5 * sample
UpperCAmelCase : str = self.vae.decode(A ).sample
UpperCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = transforms.Resize(self.feature_extractor_size )(A )
UpperCAmelCase : Union[str, Any] = self.normalize(A ).to(latents.dtype )
UpperCAmelCase : str = self.clip_model.get_image_features(A )
UpperCAmelCase : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
UpperCAmelCase : List[Any] = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
UpperCAmelCase : Any = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
UpperCAmelCase : List[str] = latents.detach() + grads * (sigma**2)
UpperCAmelCase : List[str] = noise_pred_original
else:
UpperCAmelCase : Any = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> List[Any]:
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
UpperCAmelCase : Any = [generator] + [None] * (batch_size - 1)
UpperCAmelCase : Any = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
UpperCAmelCase : int = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase : str = """, """.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCAmelCase : Optional[int] = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCAmelCase : Union[str, Any] = self.get_image_description(A )
# get prompt text embeddings for content and style
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase : str = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase : Any = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : Any = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
UpperCAmelCase : Union[str, Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase : str = {}
if accepts_offset:
UpperCAmelCase : List[str] = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Tuple = self.get_timesteps(A , A , self.device )
UpperCAmelCase : Dict = timesteps[:1].repeat(A )
# Preprocess image
UpperCAmelCase : int = preprocess(A , A , A )
UpperCAmelCase : Dict = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
UpperCAmelCase : str = preprocess(A , A , A )
UpperCAmelCase : Any = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
UpperCAmelCase : List[str] = slerp(A , A , A )
if clip_guidance_scale > 0:
UpperCAmelCase : Optional[Any] = self.get_clip_image_embeddings(A , A )
UpperCAmelCase : Optional[Any] = self.get_clip_image_embeddings(A , A )
UpperCAmelCase : Tuple = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : Union[str, Any] = content_text_input.input_ids.shape[-1]
UpperCAmelCase : List[str] = self.tokenizer([""""""] , padding="""max_length""" , max_length=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase : str = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase : Dict = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Dict = {}
if accepts_eta:
UpperCAmelCase : Any = eta
# check if the scheduler accepts generator
UpperCAmelCase : Tuple = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase : Optional[Any] = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : List[str] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Union[str, Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : Tuple = noise_pred.chunk(2 )
UpperCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase : List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase : int = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Tuple = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase : int = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 357
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a : Optional[int] = 1_0
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
for i in range(_lowercase , _lowercase ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = len(_lowercase )
while left <= right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase : Tuple = two_third + 1
else:
UpperCAmelCase : int = one_third + 1
UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : str = (left + right) // 3 + 1
UpperCAmelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowercase , one_third - 1 , _lowercase , _lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowercase , _lowercase , _lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowercase , _lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = input("""Enter numbers separated by comma:\n""").strip()
a : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a : Tuple = int(input("""Enter the number to be found in the list:\n""").strip())
a : Union[str, Any] = ite_ternary_search(collection, target)
a : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 338
| 0
|
"""simple docstring"""
lowerCamelCase_ : List[Any] = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 81
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
| 0
|
'''simple docstring'''
snake_case__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
A_ : Dict = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(lowerCamelCase__ )}'
)
raise ValueError(lowerCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4
| 1
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : int = logging.get_logger(__name__)
lowercase : List[str] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __snake_case ( __snake_case ):
_a : List[str]= "bart"
_a : Optional[Any]= ["past_key_values"]
_a : Any= {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self ,snake_case=50265 ,snake_case=1024 ,snake_case=12 ,snake_case=4096 ,snake_case=16 ,snake_case=12 ,snake_case=4096 ,snake_case=16 ,snake_case=0.0 ,snake_case=0.0 ,snake_case="gelu" ,snake_case=1024 ,snake_case=0.1 ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.02 ,snake_case=0.0 ,snake_case=False ,snake_case=True ,snake_case=3 ,snake_case=1 ,snake_case=0 ,snake_case=2 ,snake_case=True ,snake_case=2 ,snake_case=2 ,**snake_case ,):
'''simple docstring'''
lowercase : Dict = vocab_size
lowercase : Any = max_position_embeddings
lowercase : Union[str, Any] = d_model
lowercase : str = encoder_ffn_dim
lowercase : Dict = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Union[str, Any] = decoder_ffn_dim
lowercase : Optional[Any] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : Optional[int] = dropout
lowercase : Optional[int] = attention_dropout
lowercase : Optional[int] = activation_dropout
lowercase : Dict = activation_function
lowercase : List[str] = init_std
lowercase : Any = encoder_layerdrop
lowercase : List[Any] = decoder_layerdrop
lowercase : List[str] = classifier_dropout
lowercase : Any = use_cache
lowercase : Optional[Any] = encoder_layers
lowercase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case ,pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,forced_eos_token_id=snake_case ,**snake_case ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" ,snake_case ):
lowercase : Union[str, Any] = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class __snake_case ( __snake_case ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase : Optional[int] = {0: '''batch'''}
lowercase : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase : str = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase : Tuple = self.num_layers
for i in range(snake_case ):
lowercase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase : Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : str = super().outputs
else:
lowercase : Tuple = super(snake_case ,self ).outputs
if self.use_past:
lowercase : Any = self.num_layers
for i in range(snake_case ):
lowercase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case )
# Generate decoder inputs
lowercase : Union[str, Any] = seq_length if not self.use_past else 1
lowercase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case )
lowercase : Union[str, Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowercase : Union[str, Any] = dict(**snake_case ,**snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase : Union[str, Any] = common_inputs['''input_ids'''].shape
lowercase : Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
lowercase : str = self.num_attention_heads
lowercase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Tuple = decoder_seq_length + 3
lowercase : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase : Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case ,snake_case )] ,dim=1 )
lowercase : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase : Tuple = self.num_layers
lowercase : Union[str, Any] = min(snake_case ,snake_case )
lowercase : Optional[Any] = max(snake_case ,snake_case ) - min_num_layers
lowercase : Any = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
) )
# TODO: test this.
lowercase : List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case ,snake_case ):
common_inputs["past_key_values"].append((torch.zeros(snake_case ), torch.zeros(snake_case )) )
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : List[Any] = seqlen + 2
lowercase : List[Any] = self.num_layers
lowercase : Optional[Any] = self.num_attention_heads
lowercase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Union[str, Any] = common_inputs['''attention_mask'''].dtype
lowercase : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case ,snake_case ,dtype=snake_case )] ,dim=1 )
lowercase : Tuple = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(snake_case )
]
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : Tuple = compute_effective_axis_dimension(
snake_case ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : int = tokenizer.num_special_tokens_to_add(snake_case )
lowercase : int = compute_effective_axis_dimension(
snake_case ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=snake_case )
# Generate dummy inputs according to compute batch and sequence
lowercase : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase : List[Any] = dict(tokenizer(snake_case ,return_tensors=snake_case ) )
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
elif self.task == "causal-lm":
lowercase : int = self._generate_dummy_inputs_for_causal_lm(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
else:
lowercase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : List[str] = super()._flatten_past_key_values_(snake_case ,snake_case ,snake_case ,snake_case )
else:
lowercase : Optional[Any] = super(snake_case ,self )._flatten_past_key_values_(
snake_case ,snake_case ,snake_case ,snake_case )
| 20
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowercase : Optional[Any] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
A : str = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('''emb.''' ):
A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ )
# ffn -> feed_forward
A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A : List[Any] = '''rwkv.''' + name
A : Dict = weight
return state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ):
'''simple docstring'''
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A : int = 5_0277
A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
A : Any = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A : List[str] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
A : Any = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ )
A : Tuple = torch.load(snake_case__ , map_location='''cpu''' )
A : List[Any] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
A, A : List[str] = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
A : Dict = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A : int = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowercase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 3
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ :Any = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase__ ( a__: Optional[Any] ) -> List[Any]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a__ )
def lowerCAmelCase__ ( a__: Dict ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(a__ , id=a__ )
| 185
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __a ( UpperCAmelCase ):
_a : Optional[int] = 'MCTCTFeatureExtractor'
_a : int = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_UpperCAmelCase = kwargs.pop('raw_speech' )
else:
_UpperCAmelCase = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase = encodings['input_ids']
return inputs
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('input_features' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('labels' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if input_features is not None:
_UpperCAmelCase = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is not None:
_UpperCAmelCase = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCAmelCase = labels['input_ids']
return input_features
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
| 185
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 1
|
from __future__ import annotations
from typing import Any
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 352
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE : Tuple = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Union[str, Any] = EfficientNetConfig()
_lowercase : Any = CONFIG_MAP[model_name]['hidden_dim']
_lowercase : Any = CONFIG_MAP[model_name]['width_coef']
_lowercase : Optional[int] = CONFIG_MAP[model_name]['depth_coef']
_lowercase : List[Any] = CONFIG_MAP[model_name]['image_size']
_lowercase : Tuple = CONFIG_MAP[model_name]['dropout_rate']
_lowercase : Dict = CONFIG_MAP[model_name]['dw_padding']
_lowercase : str = 'huggingface/label-files'
_lowercase : Optional[Any] = 'imagenet-1k-id2label.json'
_lowercase : List[Any] = 1000
_lowercase : str = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : Optional[int] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Tuple = CONFIG_MAP[model_name]['image_size']
_lowercase : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowerCamelCase_ , )
return preprocessor
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Tuple = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_lowercase : Tuple = sorted(set(lowerCamelCase_ ) )
_lowercase : List[Any] = len(lowerCamelCase_ )
_lowercase : List[str] = {b: str(lowerCamelCase_ ) for b, i in zip(lowerCamelCase_ , range(lowerCamelCase_ ) )}
_lowercase : Optional[int] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_lowercase : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_lowercase : Optional[Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowercase : str = 'efficientnet.' + item[1]
_lowercase : Optional[Any] = 'classifier.weight'
_lowercase : List[str] = 'classifier.bias'
return key_mapping
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowercase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowercase : Dict = torch.from_numpy(lowerCamelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowercase : Tuple = torch.from_numpy(np.transpose(lowerCamelCase_ ) )
else:
_lowercase : List[str] = torch.from_numpy(lowerCamelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase_ )
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Any = model_classes[model_name](
include_top=lowerCamelCase_ , weights='imagenet' , input_tensor=lowerCamelCase_ , input_shape=lowerCamelCase_ , pooling=lowerCamelCase_ , classes=1000 , classifier_activation='softmax' , )
_lowercase : int = original_model.trainable_variables
_lowercase : Dict = original_model.non_trainable_variables
_lowercase : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowercase : int = param.numpy()
_lowercase : int = list(tf_params.keys() )
# Load HuggingFace model
_lowercase : int = get_efficientnet_config(lowerCamelCase_ )
_lowercase : List[str] = EfficientNetForImageClassification(lowerCamelCase_ ).eval()
_lowercase : str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_lowercase : Optional[int] = rename_keys(lowerCamelCase_ )
replace_params(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Initialize preprocessor and preprocess input image
_lowercase : Optional[Any] = convert_image_processor(lowerCamelCase_ )
_lowercase : Optional[Any] = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowercase : Any = hf_model(**lowerCamelCase_ )
_lowercase : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
_lowercase : List[Any] = False
_lowercase : List[Any] = CONFIG_MAP[model_name]['image_size']
_lowercase : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowercase : Optional[Any] = image.img_to_array(lowerCamelCase_ )
_lowercase : Any = np.expand_dims(lowerCamelCase_ , axis=0 )
_lowercase : Optional[int] = original_model.predict(lowerCamelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase_ ):
os.mkdir(lowerCamelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase_ )
preprocessor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_lowercase : str = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCamelCase_ )
hf_model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 84
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ (UpperCamelCase_ ):
UpperCAmelCase__ : Dict = ["""image_processor""", """tokenizer"""]
UpperCAmelCase__ : List[str] = """CLIPImageProcessor"""
UpperCAmelCase__ : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :Tuple ,__snake_case :Union[str, Any]=None ,__snake_case :Dict=None ,**__snake_case :int ) -> Optional[int]:
a__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__SCREAMING_SNAKE_CASE ,)
a__ = kwargs.pop('feature_extractor' )
a__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __call__( self :int ,__snake_case :Tuple=None ,__snake_case :int=None ,__snake_case :Optional[Any]=None ,**__snake_case :List[Any] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a__ = self.tokenizer(__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if images is not None:
a__ = self.image_processor(__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) ,tensor_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :str ,*__snake_case :int ,**__snake_case :List[str] ) -> str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Tuple ,*__snake_case :Optional[Any] ,**__snake_case :str ) -> List[str]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = self.tokenizer.model_input_names
a__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,__SCREAMING_SNAKE_CASE ,)
return self.image_processor_class
@property
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,__SCREAMING_SNAKE_CASE ,)
return self.image_processor
| 240
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 4_00_00_00 ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowercase )
_UpperCAmelCase , _UpperCAmelCase = b, a + b
return sum(lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 1
|
'''simple docstring'''
__snake_case ={
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.3_5_5_8_1_8,
}
def a_ ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {', '.join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4
| 1
|
def _a ( a :list ) -> list:
a = len(a )
for i in range(1 , a ):
a = collection[i]
a = 0
a = i - 1
while low <= high:
a = (low + high) // 2
if val < collection[mid]:
a = mid - 1
else:
a = mid + 1
for j in range(a , a , -1 ):
a = collection[j - 1]
a = val
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 357
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : float | Decimal , UpperCAmelCase_ : float = 10**-10 ) -> float:
__lowerCamelCase : List[str] = a
while True:
__lowerCamelCase : Tuple = Decimal(UpperCAmelCase_ ) - (
Decimal(eval(UpperCAmelCase_ ) ) / Decimal(eval(str(diff(UpperCAmelCase_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCAmelCase_ ) ) < precision: # noqa: S307
return float(UpperCAmelCase_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 185
|
'''simple docstring'''
from math import isqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(UpperCAmelCase_ ) + 1 ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10**6 ) -> int:
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(UpperCAmelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 185
| 1
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_A : List[str] = '''scheduler_config.json'''
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : Any = 5
_UpperCAmelCase : Dict = 6
_UpperCAmelCase : Tuple = 7
_UpperCAmelCase : Any = 8
_UpperCAmelCase : Optional[Any] = 9
_UpperCAmelCase : int = 1_0
_UpperCAmelCase : Union[str, Any] = 1_1
_UpperCAmelCase : List[str] = 1_2
_UpperCAmelCase : Tuple = 1_3
_UpperCAmelCase : List[str] = 1_4
@dataclass
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase : str = 4_2
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Union[str, Any] = SCHEDULER_CONFIG_NAME
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Optional[Any] = True
@classmethod
def __lowerCamelCase ( cls : str , A : Dict[str, Any] = None , A : Optional[str] = None , A : str=False , **A : Tuple , ) ->List[str]:
lowerCamelCase__ : int = cls.load_config(
pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , return_commit_hash=__a , **__a , )
return cls.from_config(__a , return_unused_kwargs=__a , **__a )
def __lowerCamelCase ( self : Dict , A : Union[str, os.PathLike] , A : bool = False , **A : Any ) ->List[str]:
self.save_config(save_directory=__a , push_to_hub=__a , **__a )
@property
def __lowerCamelCase ( self : Tuple ) ->List[Any]:
return self._get_compatibles()
@classmethod
def __lowerCamelCase ( cls : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase__ : str = importlib.import_module(__name__.split('''.''' )[0] )
lowerCamelCase__ : Dict = [
getattr(__a , __a ) for c in compatible_classes_str if hasattr(__a , __a )
]
return compatible_classes
| 350
|
import pprint
import requests
_A : Any = 'https://zenquotes.io/api'
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A : Optional[Any] = random_quotes()
pprint.pprint(response)
| 265
| 0
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = 0
if start < end:
snake_case_ :Dict = randint(_lowercase, _lowercase )
snake_case_ :List[Any] = a[end]
snake_case_ :str = a[pivot]
snake_case_ :List[Any] = temp
snake_case_, snake_case_ :Any = _in_place_partition(_lowercase, _lowercase, _lowercase )
count += _in_place_quick_sort(_lowercase, _lowercase, p - 1 )
count += _in_place_quick_sort(_lowercase, p + 1, _lowercase )
return count
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = 0
snake_case_ :int = randint(_lowercase, _lowercase )
snake_case_ :Optional[int] = a[end]
snake_case_ :List[str] = a[pivot]
snake_case_ :Any = temp
snake_case_ :str = start - 1
for index in range(_lowercase, _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ :Optional[int] = new_pivot_index + 1
snake_case_ :Tuple = a[new_pivot_index]
snake_case_ :Tuple = a[index]
snake_case_ :Tuple = temp
snake_case_ :int = a[new_pivot_index + 1]
snake_case_ :Union[str, Any] = a[end]
snake_case_ :str = temp
return new_pivot_index + 1, count
__a = TemporaryFile()
__a = 1_00 # 1000 elements are to be sorted
__a , __a = 0, 1 # mean and standard deviation
__a = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
__a = np.load(outfile)
__a = len(M) - 1
__a = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 66
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Dict = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = inputs["""prompt"""]
lowerCAmelCase_ :Optional[int] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Optional[int] = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""image"""]
else:
lowerCAmelCase_ :int = None
if "mask_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""mask_image"""]
else:
lowerCAmelCase_ :int = None
if "original_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""original_image"""]
else:
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A )
# inputs with prompt converted to embeddings
lowerCAmelCase_ :List[str] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :int = image
if mask_image is not None:
lowerCAmelCase_ :Tuple = mask_image
if original_image is not None:
lowerCAmelCase_ :Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
lowerCAmelCase_ :Optional[int] = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Tuple = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase_ :Tuple = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :Optional[int] = image
if mask_image is not None:
lowerCAmelCase_ :str = mask_image
if original_image is not None:
lowerCAmelCase_ :Tuple = original_image
lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Dict = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
| 84
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : List[Any] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = DebertaVaTokenizer
__UpperCamelCase : Any = DebertaVaTokenizerFast
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[Any] = True
def lowerCAmelCase__ ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_: List[str] = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[int] ):
UpperCamelCase_: Tuple = """this is a test"""
UpperCamelCase_: Optional[Any] = """this is a test"""
return input_text, output_text
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[str] = """<pad>"""
UpperCamelCase_: List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 3_0001 )
def lowerCAmelCase__ ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def lowerCAmelCase__ ( self : Dict ):
# fmt: off
UpperCamelCase_: Dict = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase_: Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase_: Any = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
UpperCamelCase_: Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Tuple = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
UpperCamelCase_: Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase__ ( self : str ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase__ ( self : Optional[int] ):
pass
def lowerCAmelCase__ ( self : Optional[int] ):
# fmt: off
UpperCamelCase_: Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase_: List[str] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase_: List[str] = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Optional[int] = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
# fmt: off
UpperCamelCase_: List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase_: Dict = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase_: List[Any] = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: int = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Union[str, Any] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
# fmt: off
UpperCamelCase_: int = """I was born in 92000, and this is falsé."""
UpperCamelCase_: List[str] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase_: Optional[Any] = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase_: Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase_: Union[str, Any] = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: str = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
# fmt: off
UpperCamelCase_: Union[str, Any] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase_: List[Any] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase_: Union[str, Any] = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
UpperCamelCase_: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Tuple = self.get_rust_tokenizer()
UpperCamelCase_: List[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase_: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
UpperCamelCase_: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Any = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: str = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Any = self.get_rust_tokenizer()
UpperCamelCase_: Optional[int] = tokenizer.encode(snake_case_ )
UpperCamelCase_: Dict = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = """This is a test"""
UpperCamelCase_: Union[str, Any] = [13, 1, 4398, 25, 21, 1289]
UpperCamelCase_: Union[str, Any] = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase_: List[Any] = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase_: Dict = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
UpperCamelCase_: Union[str, Any] = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
UpperCamelCase_: str = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Dict = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Optional[int] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
UpperCamelCase_: int = """I was born in 92000, and this is falsé."""
UpperCamelCase_: Any = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
UpperCamelCase_: Any = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase_: Any = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Any = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Any = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Union[str, Any] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[str] = DebertaVaTokenizer(snake_case_ )
UpperCamelCase_: Optional[Any] = tokenizer.encode("""sequence builders""" )
UpperCamelCase_: Tuple = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase_: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[int] = {"""input_ids""": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 365
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase_ : str = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : str=1 ):
UpperCamelCase_: List[str] = tokenizer
UpperCamelCase_: str = dataset
UpperCamelCase_: List[str] = len(snake_case_ ) if n_tasks is None else n_tasks
UpperCamelCase_: str = n_copies
def __iter__( self : Tuple ):
UpperCamelCase_: Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = start_length
UpperCamelCase_: Dict = eof_strings
UpperCamelCase_: List[str] = tokenizer
def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase_: Dict = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(snake_case_ )
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = re.split("""(%s)""" % """|""".join(lowerCamelCase ) , lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=20 , **lowerCamelCase ) -> int:
UpperCamelCase_: str = defaultdict(lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCamelCase ) ):
with torch.no_grad():
UpperCamelCase_: Optional[int] = batch["""ids"""].shape[-1]
UpperCamelCase_: Dict = accelerator.unwrap_model(lowerCamelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=lowerCamelCase , **lowerCamelCase )
# each task is generated batch_size times
UpperCamelCase_: Optional[int] = batch["""task_id"""].repeat(lowerCamelCase )
UpperCamelCase_: int = accelerator.pad_across_processes(
lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase_, UpperCamelCase_: Tuple = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase_: Tuple = generated_tokens.cpu().numpy()
UpperCamelCase_: Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCamelCase , lowerCamelCase ):
gen_token_dict[task].append(lowerCamelCase )
UpperCamelCase_: Dict = [[] for _ in range(lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase_: Any = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
code_gens[task].append(remove_last_block(lowerCamelCase ) )
return code_gens
def A__ ( ) -> Union[str, Any]:
# Setup configuration
UpperCamelCase_: Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_: str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase_: List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase_: Union[str, Any] = """false"""
if args.num_workers is None:
UpperCamelCase_: Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase_: List[Any] = Accelerator()
set_seed(args.seed , device_specific=lowerCamelCase )
# Load model and tokenizer
UpperCamelCase_: Any = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase_: Union[str, Any] = tokenizer.eos_token
UpperCamelCase_: Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase_: Union[str, Any] = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase , lowerCamelCase )] ),
}
# Load evaluation dataset and metric
UpperCamelCase_: Any = load_dataset("""openai_humaneval""" )
UpperCamelCase_: Union[str, Any] = load_metric("""code_eval""" )
UpperCamelCase_: Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
UpperCamelCase_: List[Any] = args.n_samples // args.batch_size
UpperCamelCase_: Any = TokenizedDataset(lowerCamelCase , human_eval["""test"""] , n_copies=lowerCamelCase , n_tasks=lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase_: Optional[int] = DataLoader(lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase_: List[str] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
UpperCamelCase_, UpperCamelCase_: Dict = accelerator.prepare(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = complete_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , n_tasks=lowerCamelCase , batch_size=args.batch_size , **lowerCamelCase , )
if accelerator.is_main_process:
UpperCamelCase_: List[Any] = []
for task in tqdm(range(lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = human_eval["""test"""][task]["""test"""]
UpperCamelCase_: Optional[int] = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase_, UpperCamelCase_: str = code_eval_metric.compute(
references=lowerCamelCase , predictions=lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 223
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Dict = 'megatron-bert'
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=2_9_0_5_6 , SCREAMING_SNAKE_CASE_ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Any=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : int=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : int="absolute" , SCREAMING_SNAKE_CASE_ : int=True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
| 30
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
| 1
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :str=1024 , __lowerCamelCase :Optional[Any]=1024 , __lowerCamelCase :str=False , **__lowerCamelCase :Any ):
_lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase )
_lowerCAmelCase = SeqaSeqDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , type_path="""train""" , **__lowerCamelCase )
_lowerCAmelCase = tok.pad_token_id
def get_lens(__lowerCamelCase :str ):
_lowerCAmelCase = tqdm(
DataLoader(__lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowerCAmelCase = []
for batch in dl:
_lowerCAmelCase = batch['''input_ids'''].ne(__lowerCamelCase ).sum(1 ).tolist()
_lowerCAmelCase = batch['''labels'''].ne(__lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCamelCase , __lowerCamelCase ):
max_lens.append(max(__lowerCamelCase , __lowerCamelCase ) )
else:
max_lens.extend(__lowerCamelCase )
return max_lens
_lowerCAmelCase = get_lens(__lowerCamelCase )
_lowerCAmelCase = SeqaSeqDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , type_path="""val""" , **__lowerCamelCase )
_lowerCAmelCase = get_lens(__lowerCamelCase )
pickle_save(__lowerCamelCase , train_ds.len_file )
pickle_save(__lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 357
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowercase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowercase = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A (__lowerCamelCase :str ):
_lowerCAmelCase = None
# source code of `config_class`
_lowerCAmelCase = inspect.getsource(__lowerCamelCase )
_lowerCAmelCase = _re_checkpoint.findall(__lowerCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
_lowerCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase = ckpt_name
break
return checkpoint
def A ():
_lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase = get_checkpoint_from_config_class(__lowerCamelCase )
_lowerCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_lowerCAmelCase = """\n""".join(sorted(__lowerCamelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 229
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case__ ( _A: Any=None ) -> int:
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase = subparsers.add_parser("""test""" )
else:
lowerCAmelCase = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=snake_case_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def snake_case__ ( _A: Any ) -> int:
'''simple docstring'''
lowerCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowerCAmelCase = script_name
else:
lowerCAmelCase = f"--config_file={args.config_file} {script_name}"
lowerCAmelCase = ["""accelerate-launch"""] + test_args.split()
lowerCAmelCase = execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = test_command_parser()
lowerCAmelCase = parser.parse_args()
test_command(snake_case_ )
if __name__ == "__main__":
main()
| 272
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = "blip_2_vision_model"
def __init__( self : Dict , snake_case__ : int=1408 , snake_case__ : str=6144 , snake_case__ : List[Any]=39 , snake_case__ : Tuple=16 , snake_case__ : str=224 , snake_case__ : Optional[Any]=14 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Dict=0.00_001 , snake_case__ : List[str]=0.0 , snake_case__ : List[Any]=1E-10 , snake_case__ : List[str]=True , **snake_case__ : List[str] , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Union[str, Any] =intermediate_size
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : Optional[int] =num_attention_heads
lowerCamelCase_ : Optional[Any] =patch_size
lowerCamelCase_ : Dict =image_size
lowerCamelCase_ : List[Any] =initializer_range
lowerCamelCase_ : List[Any] =attention_dropout
lowerCamelCase_ : List[Any] =layer_norm_eps
lowerCamelCase_ : Optional[int] =hidden_act
lowerCamelCase_ : Optional[Any] =qkv_bias
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[Any] ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ : Optional[Any] =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase_ : Optional[int] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Tuple = "blip_2_qformer"
def __init__( self : Tuple , snake_case__ : Optional[int]=3_0522 , snake_case__ : Tuple=768 , snake_case__ : Optional[Any]=12 , snake_case__ : int=12 , snake_case__ : str=3072 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=512 , snake_case__ : Any=0.02 , snake_case__ : List[str]=1E-12 , snake_case__ : Optional[int]=0 , snake_case__ : List[Any]="absolute" , snake_case__ : int=2 , snake_case__ : Optional[Any]=1408 , **snake_case__ : List[Any] , ):
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =vocab_size
lowerCamelCase_ : List[Any] =hidden_size
lowerCamelCase_ : Union[str, Any] =num_hidden_layers
lowerCamelCase_ : int =num_attention_heads
lowerCamelCase_ : Tuple =hidden_act
lowerCamelCase_ : List[str] =intermediate_size
lowerCamelCase_ : List[str] =hidden_dropout_prob
lowerCamelCase_ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Tuple =initializer_range
lowerCamelCase_ : Optional[Any] =layer_norm_eps
lowerCamelCase_ : Any =position_embedding_type
lowerCamelCase_ : List[str] =cross_attention_frequency
lowerCamelCase_ : str =encoder_hidden_size
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Union[str, Any] ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ : Tuple =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase_ : List[Any] =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[Any] = "blip-2"
_UpperCAmelCase :Dict = True
def __init__( self : Optional[int] , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=32 , **snake_case__ : Optional[int] ):
super().__init__(**snake_case__ )
if vision_config is None:
lowerCamelCase_ : Any ={}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase_ : Tuple ={}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase_ : Optional[int] ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase_ : Optional[int] =BlipaVisionConfig(**snake_case__ )
lowerCamelCase_ : Union[str, Any] =BlipaQFormerConfig(**snake_case__ )
lowerCamelCase_ : int =text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase_ : int =CONFIG_MAPPING[text_model_type](**snake_case__ )
lowerCamelCase_ : Tuple =self.text_config.tie_word_embeddings
lowerCamelCase_ : str =self.text_config.is_encoder_decoder
lowerCamelCase_ : Optional[int] =num_query_tokens
lowerCamelCase_ : Optional[int] =self.vision_config.hidden_size
lowerCamelCase_ : Dict =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ : Optional[int] =1.0
lowerCamelCase_ : List[Any] =0.02
@classmethod
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : BlipaVisionConfig , snake_case__ : BlipaQFormerConfig , snake_case__ : PretrainedConfig , **snake_case__ : List[Any] , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case__ , )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : str =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : List[str] =self.vision_config.to_dict()
lowerCamelCase_ : Dict =self.qformer_config.to_dict()
lowerCamelCase_ : str =self.text_config.to_dict()
lowerCamelCase_ : List[str] =self.__class__.model_type
return output
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a : Union[str, Any] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265
| 1
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
def is_valid_tree(SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> bool:
if node is None:
return True
if not isinstance(A__ , A__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(A__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , A__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , A__ )
)
return is_binary_search_tree_recursive_check(A__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if b == 0:
return (1, 0)
((__UpperCamelCase) , (__UpperCamelCase)) =extended_euclid(SCREAMING_SNAKE_CASE__ , a % b )
__UpperCamelCase =a // b
return (y, x - k * y)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
((__UpperCamelCase) , (__UpperCamelCase)) =extended_euclid(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =na * na
__UpperCamelCase =ra * x * na + ra * y * na
return (n % m + m) % m
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
((__UpperCamelCase) , (__UpperCamelCase)) =extended_euclid(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if b < 0:
__UpperCamelCase =(b % n + n) % n
return b
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase , __UpperCamelCase =invert_modulo(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), invert_modulo(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =na * na
__UpperCamelCase =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 117
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> int:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = vocab_size - 1
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase = True
return config, input_ids, input_mask, token_labels
def __A ( self , A , A , A ) -> str:
'''simple docstring'''
lowerCamelCase = GPTNeoXModel(config=A )
model.to(A )
model.eval()
lowerCamelCase = model(A , attention_mask=A )
lowerCamelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = GPTNeoXModel(A )
model.to(A )
model.eval()
lowerCamelCase = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
lowerCamelCase = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
lowerCamelCase = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
lowerCamelCase = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowerCamelCase = model(A , attention_mask=A , use_cache=A )
lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase = model(A , attention_mask=A , output_hidden_states=A )
lowerCamelCase = output_from_no_past["hidden_states"][0]
lowerCamelCase = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["hidden_states"][0]
# select random slice
lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase = config_and_inputs
lowerCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCamelCase : str = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = GPTNeoXModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def __A ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
lowerCamelCase = original_model(A ).last_hidden_state
lowerCamelCase = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = {"type": scaling_type, "factor": 10.0}
lowerCamelCase = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
lowerCamelCase = scaled_model(A ).last_hidden_state
lowerCamelCase = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
lowerCamelCase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
lowerCamelCase = model.generate(**A , do_sample=A , max_new_tokens=20 )
lowerCamelCase = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 252
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]="pt" ):
lowercase_ :Dict = {"add_prefix_space": True} if isinstance(__lowerCamelCase ,__lowerCamelCase ) and not line.startswith(" " ) else {}
lowercase_ :str = padding_side
return tokenizer(
[line] ,max_length=__lowerCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str=None ,):
lowercase_ :Optional[int] = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str="train" , lowercase : Dict=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int="" , ):
"""simple docstring"""
super().__init__()
lowercase_ :List[Any] = Path(lowercase ).joinpath(type_path + ".source" )
lowercase_ :Dict = Path(lowercase ).joinpath(type_path + ".target" )
lowercase_ :Optional[int] = self.get_char_lens(self.src_file )
lowercase_ :List[str] = max_source_length
lowercase_ :str = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
lowercase_ :int = tokenizer
lowercase_ :Dict = prefix
if n_obs is not None:
lowercase_ :Union[str, Any] = self.src_lens[:n_obs]
lowercase_ :Optional[int] = src_lang
lowercase_ :str = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Tuple = index + 1 # linecache starts at 1
lowercase_ :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("\n" )
lowercase_ :List[str] = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ :List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
lowercase_ :int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
lowercase_ :List[str] = encode_line(lowercase , lowercase , self.max_source_length , "right" )
lowercase_ :Any = encode_line(lowercase , lowercase , self.max_target_length , "right" )
lowercase_ :Dict = source_inputs["input_ids"].squeeze()
lowercase_ :Tuple = target_inputs["input_ids"].squeeze()
lowercase_ :Optional[int] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( lowercase : Union[str, Any] ):
"""simple docstring"""
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def lowercase__ ( self : str , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = torch.stack([x["input_ids"] for x in batch] )
lowercase_ :Dict = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ :List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ :Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :Union[str, Any] = trim_batch(lowercase , lowercase )
lowercase_ , lowercase_ :Optional[Any] = trim_batch(lowercase , lowercase , attention_mask=lowercase )
lowercase_ :Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase : List[str] =getLogger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[str] = get_git_info()
save_json(__lowerCamelCase ,os.path.join(__lowerCamelCase ,"git_log.json" ) )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=4 ,**__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=__lowerCamelCase ,**__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( ):
lowercase_ :Dict = git.Repo(search_parent_directories=__lowerCamelCase )
lowercase_ :List[str] = {
"repo_id": str(__lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowerCamelCase : Callable ,__lowerCamelCase : Iterable ):
return list(map(__lowerCamelCase ,__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"wb" ) as f:
return pickle.dump(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def remove_articles(__lowerCamelCase : Optional[int] ):
return re.sub(r"\b(a|an|the)\b" ," " ,__lowerCamelCase )
def white_space_fix(__lowerCamelCase : Dict ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowercase_ :Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ):
lowercase_ :Tuple = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Dict = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Tuple = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowercase_ :Tuple = sum(common.values() )
if num_same == 0:
return 0
lowercase_ :Union[str, Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :List[Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase_ :Any = 0
for hypo, pred in zip(__lowerCamelCase ,__lowerCamelCase ):
em += exact_match_score(__lowerCamelCase ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def UpperCAmelCase_ ( __lowerCamelCase : str ):
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ):
lowercase_ :Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ :List[str] = "dropout_rate"
for p in extra_params:
if getattr(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and not hasattr(__lowerCamelCase ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
continue
lowercase_ :List[Any] = p if hasattr(__lowerCamelCase ,__lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase ,__lowerCamelCase ,getattr(__lowerCamelCase ,__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
return hparams, config
| 223
| 0
|
"""simple docstring"""
import requests
def lowercase ( _snake_case : str , _snake_case : str ) ->None:
"""simple docstring"""
__snake_case : Dict = {'''Content-Type''': '''application/json'''}
__snake_case : Optional[Any] = requests.post(_snake_case , json={'''text''': message_body} , headers=_snake_case )
if response.status_code != 200:
__snake_case : Union[str, Any] = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 24
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24
| 1
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = "cpu"
SCREAMING_SNAKE_CASE__ = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
SCREAMING_SNAKE_CASE__ = "path-to-your-trained-model"
SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE__ = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE__ = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE__ = torch.randn(2, 4, 64, 64)
SCREAMING_SNAKE_CASE__ = torch.rand(1) * 999
SCREAMING_SNAKE_CASE__ = torch.randn(2, 77, 768)
SCREAMING_SNAKE_CASE__ = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE__ = 666
SCREAMING_SNAKE_CASE__ = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE__ = {"generator": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 46
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[int]:
__lowerCAmelCase = np.random.default_rng(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = length
__lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
__lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return self.length
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Any:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = True
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> str:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a + self.b
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : int = 16 ) -> int:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__lowerCAmelCase = load_dataset("""csv""" , data_files=snake_case_ )
__lowerCAmelCase = datasets["""train"""].unique("""label""" )
__lowerCAmelCase = {v: i for i, v in enumerate(snake_case_ )}
def tokenize_function(snake_case_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ , padding="""max_length""" )
if "label" in examples:
__lowerCAmelCase = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(tokenized_datasets["""train"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=2 )
__lowerCAmelCase = DataLoader(tokenized_datasets["""validation"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 229
| 0
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : Dict ) -> Any:
_lowerCamelCase = [0] * len(lowercase_ )
_lowerCamelCase = []
_lowerCamelCase = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
_lowerCamelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCamelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
__SCREAMING_SNAKE_CASE : List[str] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'poolformer'
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=1_6 , lowerCamelCase__=1_6 , lowerCamelCase__=3 , lowerCamelCase__=4.0 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[2, 1, 1, 1] , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = stride
_lowerCamelCase = padding
_lowerCamelCase = pool_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = mlp_ratio
_lowerCamelCase = depths
_lowerCamelCase = patch_sizes
_lowerCamelCase = strides
_lowerCamelCase = num_encoder_blocks
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_layer_scale
_lowerCamelCase = layer_scale_init_value
_lowerCamelCase = initializer_range
super().__init__(**lowerCamelCase__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = version.parse('1.11' )
@property
def snake_case__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self ):
return 2e-3
| 73
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Union[str, Any] ={
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =['''LayoutLMv2FeatureExtractor''']
lowerCAmelCase : Optional[int] =['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_a = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
_a = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
_a = "▁"
# Segments (not really needed)
_a = 0
_a = 1
_a = 2
_a = 3
_a = 4
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = """left"""
lowerCAmelCase_ = XLNetTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<sep>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<cls>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<eop>", "<eod>"] , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = 3
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 209
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=30 , A__=400 , A__=True , A__=None , A__=True , A__=1 / 255 , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , A__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : str = num_channels
A__ : Tuple = min_resolution
A__ : Dict = max_resolution
A__ : Any = do_resize
A__ : List[str] = size
A__ : Union[str, Any] = do_rescale
A__ : Tuple = rescale_factor
A__ : str = do_normalize
A__ : Dict = image_mean
A__ : Tuple = image_std
A__ : str = do_pad
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __A ( self , A__ , A__=False ):
if not batched:
A__ : Dict = image_inputs[0]
if isinstance(A__ , Image.Image ):
A__ , A__ : List[str] = image.size
else:
A__ , A__ : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
A__ : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
A__ : List[Any] = self.size["""shortest_edge"""]
elif w > h:
A__ : Optional[Any] = self.size["""shortest_edge"""]
A__ : Tuple = int(self.size["""shortest_edge"""] * w / h )
else:
A__ : int = self.size["""shortest_edge"""]
A__ : int = self.size["""shortest_edge"""]
else:
A__ : int = []
for image in image_inputs:
A__ , A__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ : Any = max(A__ , key=lambda A__ : item[0] )[0]
A__ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = DetrImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : Optional[Any] = DetrImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """do_rescale""" ) )
self.assertTrue(hasattr(A__ , """rescale_factor""" ) )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """do_pad""" ) )
def __A ( self ):
A__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , A__ )
A__ : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
A__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ : Union[str, Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
A__ : int = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ):
# Initialize image_processing
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
A__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ : Union[str, Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : Optional[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
A__ , A__ : Optional[int] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
A__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ : str = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ : int = image_processing(A__ , return_tensors="""pt""" ).pixel_values
A__ , A__ : Tuple = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __A ( self ):
# prepare image and target
A__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ : Optional[Any] = json.loads(f.read() )
A__ : Dict = {"""image_id""": 3_9769, """annotations""": target}
# encode them
A__ : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
A__ : Dict = image_processing(images=A__ , annotations=A__ , return_tensors="""pt""" )
# verify pixel values
A__ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
A__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1e-4 ) )
# verify area
A__ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
A__ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
A__ : str = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1e-3 ) )
# verify image_id
A__ : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
A__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
A__ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify orig_size
A__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
A__ : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
@slow
def __A ( self ):
# prepare image, target and masks_path
A__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ : Union[str, Any] = json.loads(f.read() )
A__ : Dict = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
A__ : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ : Union[str, Any] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
A__ : Dict = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="""pt""" )
# verify pixel values
A__ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
A__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1e-4 ) )
# verify area
A__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
A__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
A__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1e-3 ) )
# verify image_id
A__ : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
A__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
A__ : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify masks
A__ : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A__ )
# verify orig_size
A__ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
A__ : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
| 141
|
from __future__ import annotations
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> tuple[float, list[float]]:
A__ : Tuple = list(range(len(lowercase_ ) ) )
A__ : Union[str, Any] = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ : float = 0
A__ : list[float] = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __UpperCAmelCase ,__UpperCAmelCase ):
@register_to_config
def __init__( self : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None):
'''simple docstring'''
super().__init__()
__a = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__a = torch.zeros(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
else:
__a = None
__a = torch.nn.Parameter(__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : VQModel
UpperCamelCase__ : CLIPTextModel
UpperCamelCase__ : CLIPTokenizer
UpperCamelCase__ : TransformeraDModel
UpperCamelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCamelCase__ : VQDiffusionScheduler
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : TransformeraDModel , __SCREAMING_SNAKE_CASE : VQDiffusionScheduler , __SCREAMING_SNAKE_CASE : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = len(__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else 1
# get prompt text embeddings
__a = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}')
__a = text_input_ids[:, : self.tokenizer.model_max_length]
__a = self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__a = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE)
# duplicate text embeddings for each generation per prompt
__a = prompt_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__a = self.learned_classifier_free_sampling_embeddings.embeddings
__a = negative_prompt_embeds.unsqueeze(0).repeat(__SCREAMING_SNAKE_CASE , 1 , 1)
else:
__a = [''''''] * batch_size
__a = text_input_ids.shape[-1]
__a = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
__a = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
__a = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a = negative_prompt_embeds.shape[1]
__a = negative_prompt_embeds.repeat(1 , __SCREAMING_SNAKE_CASE , 1)
__a = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 5.0 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = len(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__SCREAMING_SNAKE_CASE)}')
__a = batch_size * num_images_per_prompt
__a = guidance_scale > 1.0
__a = self._encode_prompt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(__SCREAMING_SNAKE_CASE)}.')
# get the initial completely masked latents unless the user supplied it
__a = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__a = self.transformer.num_vector_embeds - 1
__a = torch.full(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F' {self.transformer.num_vector_embeds - 1} (inclusive).')
__a = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps.to(self.device)
__a = latents
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE)):
# expand the sample if we are doing classifier free guidance
__a = torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__a = self.transformer(__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE).sample
if do_classifier_free_guidance:
__a , __a = model_output.chunk(2)
__a = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__SCREAMING_SNAKE_CASE , dim=1 , keepdim=__SCREAMING_SNAKE_CASE)
__a = self.truncate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# remove `log(0)`'s (`-inf`s)
__a = model_output.clamp(-70)
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.vqvae.config.vq_embed_dim
__a = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__a = self.vqvae.quantize.get_codebook_entry(__SCREAMING_SNAKE_CASE , shape=__SCREAMING_SNAKE_CASE)
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE).sample
__a = (image / 2 + 0.5).clamp(0 , 1)
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
__a , __a = torch.sort(__SCREAMING_SNAKE_CASE , 1 , descending=__SCREAMING_SNAKE_CASE)
__a = torch.exp(__SCREAMING_SNAKE_CASE)
__a = sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__a = torch.full_like(keep_mask[:, 0:1, :] , __SCREAMING_SNAKE_CASE)
__a = torch.cat((all_true, keep_mask) , dim=1)
__a = keep_mask[:, :-1, :]
__a = keep_mask.gather(1 , indices.argsort(1))
__a = log_p_x_0.clone()
__a = -torch.inf # -inf = log(0)
return rv
| 49
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
snake_case__ : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Any , lowerCamelCase: Union[str, Any] , lowerCamelCase: Any , lowerCamelCase: int ) -> List[str]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__A = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
__A = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
__A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A = value
elif weight_type == "weight_g":
__A = value
elif weight_type == "weight_v":
__A = value
elif weight_type == "bias":
__A = value
else:
__A = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _a ( lowerCamelCase: List[str] , lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
__A = []
__A = fairseq_model.state_dict()
__A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__A = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__A = True
else:
for key, mapped_key in MAPPING.items():
__A = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__A = True
if "*" in mapped_key:
__A = name.split(lowerCamelCase )[0].split('''.''' )[-2]
__A = mapped_key.replace('''*''' , lowerCamelCase )
if "weight_g" in name:
__A = '''weight_g'''
elif "weight_v" in name:
__A = '''weight_v'''
elif "bias" in name:
__A = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A = '''weight'''
else:
__A = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( lowerCamelCase: int , lowerCamelCase: Any , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
__A = full_name.split('''conv_layers.''' )[-1]
__A = name.split('''.''' )
__A = int(items[0] )
__A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def _a ( lowerCamelCase: Tuple , lowerCamelCase: int , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[int]=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__A = UniSpeechSatConfig.from_pretrained(lowerCamelCase )
else:
__A = UniSpeechSatConfig()
__A = ''''''
if is_finetuned:
__A = UniSpeechSatForCTC(lowerCamelCase )
else:
__A = UniSpeechSatForPreTraining(lowerCamelCase )
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 117
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase__ : List[str] =logging.get_logger(__name__)
class __A ( a ):
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 370
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase__ : Union[str, Any] =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
UpperCAmelCase__ : List[str] =(
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
)
UpperCAmelCase__ : Tuple ='''|'''.join(sys.argv[1:])
UpperCAmelCase__ : List[str] =re.compile(rF"^({joined_dirs}).*?\.py$")
UpperCAmelCase__ : Any =[x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 262
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Union[str, Any] , a__ : List[str] , a__ : Any=2 , a__ : Dict=3 , a__ : Dict=4 , a__ : Tuple=2 , a__ : str=7 , a__ : List[Any]=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Optional[Any]=99 , a__ : List[Any]=36 , a__ : Optional[Any]=3 , a__ : List[str]=4 , a__ : Tuple=37 , a__ : Any="gelu" , a__ : str=0.1 , a__ : Any=0.1 , a__ : List[str]=512 , a__ : Any=16 , a__ : Tuple=2 , a__ : List[str]=0.0_2 , a__ : int=6 , a__ : Union[str, Any]=6 , a__ : str=3 , a__ : Union[str, Any]=4 , a__ : Tuple=None , a__ : List[Any]=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = patch_size
__snake_case = text_seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case = text_seq_length
__snake_case = (image_size // patch_size) ** 2 + 1
__snake_case = self.text_seq_length + self.image_seq_length
def a (self : Dict ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : int , a__ : int , a__ : List[Any] , a__ : Any , a__ : Union[str, Any] , a__ : Dict , a__ : Any , a__ : Dict , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
__snake_case = model(a__ , pixel_values=a__ )
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def a (self : Dict , a__ : Dict , a__ : Tuple , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Optional[Any] , a__ : Optional[Any] , a__ : List[Any] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[Any] , a__ : int , a__ : Union[str, Any] , a__ : Dict ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def a (self : List[str] , a__ : Dict , a__ : Tuple , a__ : Dict , a__ : int , a__ : str , a__ : List[str] , a__ : List[Any] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Any ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = False
A_ : Any = False
A_ : Optional[int] = False
A_ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def a (self : Any , a__ : Optional[Any] , a__ : Dict , a__ : Optional[Any] , a__ : Tuple , a__ : List[str] ):
"""simple docstring"""
return True
def a (self : List[str] ):
"""simple docstring"""
__snake_case = LayoutLMvaModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Tuple=False ):
"""simple docstring"""
__snake_case = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
__snake_case = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
__snake_case = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def a (self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : List[Any] ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : int ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).pixel_values.to(a__ )
__snake_case = torch.tensor([[1, 2]] )
__snake_case = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__snake_case = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
__snake_case = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
__snake_case = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ) )
| 24
|
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase: Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Optional[int] ):
a : int = ''
a : List[str] = ''
a : int = []
a : Optional[Any] = 0
a : Optional[Any] = 2_56
a : int = 0
a : Optional[int] = 0
a : str = 0
a : int = 0
def lowercase_ ( self : List[str] , __snake_case : int ):
a : Optional[Any] = cva.imread(__snake_case , 0 )
a : int = copy.deepcopy(self.img )
a , a , a : Optional[int] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : str = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : List[str] = x[i] / self.k
self.sk += prk
a : List[Any] = (self.L - 1) * self.sk
if self.rem != 0:
a : Union[str, Any] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : int = int(np.ma.count(self.img ) / self.img[1].size )
a : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Tuple = self.img[j][i]
if num != self.last_list[num]:
a : Union[str, Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Union[str, Any] ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : Any ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Dict = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 96
| 1
|
'''simple docstring'''
from math import isclose, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> tuple[float, float, float]:
'''simple docstring'''
_A = point_y / 4 / point_x
_A = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_A = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_A = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_A = outgoing_gradient**2 + 4
_A = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_A = (point_y - outgoing_gradient * point_x) ** 2 - 100
_A = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_A = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_A = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
_A = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowercase ( __lowercase = 1.4 , __lowercase = -9.6 ) -> int:
'''simple docstring'''
_A = 0
_A = first_x_coord
_A = first_y_coord
_A = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_A = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 79
|
import os
import sys
a =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
| 73
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCamelCase__ = ''''''
lowerCamelCase__ = ''''''
lowerCamelCase__ = ''''''
lowerCamelCase__ = 1 # (0 is vertical, 1 is horizontal)
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : str = get_dataset(__lowerCAmelCase ,__lowerCAmelCase )
print("Processing..." )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = update_image_and_anno(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : Optional[Any] = random_chars(32 )
_UpperCamelCase : Optional[Any] = paths[index].split(os.sep )[-1].rsplit("." ,1 )[0]
_UpperCamelCase : int = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' ,__lowerCAmelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
_UpperCamelCase : Optional[int] = []
for anno in new_annos[index]:
_UpperCamelCase : Any = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' ,"w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = []
_UpperCamelCase : str = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase ,"*.txt" ) ):
_UpperCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit("." ,1 )[0]
with open(__lowerCAmelCase ) as in_file:
_UpperCamelCase : Any = in_file.readlines()
_UpperCamelCase : int = os.path.join(__lowerCAmelCase ,F'''{label_name}.jpg''' )
_UpperCamelCase : Optional[Any] = []
for obj_list in obj_lists:
_UpperCamelCase : List[str] = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Optional[int] = []
for idx in range(len(__lowerCAmelCase ) ):
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = img_list[idx]
path_list.append(__lowerCAmelCase )
_UpperCamelCase : Dict = anno_list[idx]
_UpperCamelCase : Union[str, Any] = cva.imread(__lowerCAmelCase )
if flip_type == 1:
_UpperCamelCase : Tuple = cva.flip(__lowerCAmelCase ,__lowerCAmelCase )
for bbox in img_annos:
_UpperCamelCase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_UpperCamelCase : Dict = cva.flip(__lowerCAmelCase ,__lowerCAmelCase )
for bbox in img_annos:
_UpperCamelCase : Any = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def lowercase__ ( lowercase_ = 32 ) -> Union[str, Any]:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : List[Any] = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 371
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 0
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : int, lowercase__ : str ):
'''simple docstring'''
try:
with open(lowercase__, 'rb' ) as flax_state_f:
__lowercase =from_bytes(lowercase__, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : List[str] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__lowercase =flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa, lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__lowercase =jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowercase__ )
__lowercase =''
__lowercase =flatten_dict(lowercase__, sep='.' )
__lowercase =pt_model.state_dict()
# keep track of unexpected & missing keys
__lowercase =[]
__lowercase =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowercase =flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowercase =flax_key_tuple_array[:-1] + ['weight']
__lowercase =jnp.transpose(lowercase__, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowercase =flax_key_tuple_array[:-1] + ['weight']
__lowercase =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowercase =flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
__lowercase =(
flax_key_tuple_string.replace('_0', '.0' )
.replace('_1', '.1' )
.replace('_2', '.2' )
.replace('_3', '.3' )
.replace('_4', '.4' )
.replace('_5', '.5' )
.replace('_6', '.6' )
.replace('_7', '.7' )
.replace('_8', '.8' )
.replace('_9', '.9' )
)
__lowercase ='.'.join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__lowercase =np.asarray(lowercase__ ) if not isinstance(lowercase__, np.ndarray ) else flax_tensor
__lowercase =torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
__lowercase =list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 141
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RealmTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : List[Any]=True , __lowercase : Any="[UNK]" , __lowercase : Union[str, Any]="[SEP]" , __lowercase : Union[str, Any]="[PAD]" , __lowercase : Tuple="[CLS]" , __lowercase : List[Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : Union[str, Any]=None , **__lowercase : int , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__lowercase =getattr(__lowercase , normalizer_state.pop('type' ) )
__lowercase =do_lower_case
__lowercase =strip_accents
__lowercase =tokenize_chinese_chars
__lowercase =normalizer_class(**__lowercase )
__lowercase =do_lower_case
def snake_case ( self : List[str] , __lowercase : Optional[Any] , **__lowercase : Any ):
"""simple docstring"""
__lowercase =PaddingStrategy.MAX_LENGTH
__lowercase =text
__lowercase =kwargs.pop('text_pair' , __lowercase )
__lowercase =kwargs.pop('return_tensors' , __lowercase )
__lowercase ={
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__lowercase =batch_text_pair[idx]
else:
__lowercase =None
__lowercase =super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__lowercase =encoded_candidates.get('input_ids' )
__lowercase =encoded_candidates.get('attention_mask' )
__lowercase =encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__lowercase ={key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : Optional[int]=None ):
"""simple docstring"""
__lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
__lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 141
| 1
|
"""simple docstring"""
def A ( _lowercase ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__UpperCamelCase : Optional[int] = parser.parse_args()
if args.model_type == "bert":
__UpperCamelCase : Optional[int] = BertForMaskedLM.from_pretrained(args.model_name)
__UpperCamelCase : Optional[int] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__UpperCamelCase : List[Any] = model.state_dict()
__UpperCamelCase : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
__UpperCamelCase : List[Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__UpperCamelCase : Optional[int] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
__UpperCamelCase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__UpperCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__UpperCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__UpperCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__UpperCamelCase : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__UpperCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__UpperCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__UpperCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__UpperCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__UpperCamelCase : List[str] = state_dict['cls.predictions.decoder.weight']
__UpperCamelCase : int = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCamelCase : List[str] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
__UpperCamelCase : List[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 258
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase : int = """encodec"""
def __init__( self : str ,_snake_case : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] ,_snake_case : int=24_000 ,_snake_case : Tuple=1 ,_snake_case : int=False ,_snake_case : Optional[Any]=None ,_snake_case : Any=None ,_snake_case : Any=128 ,_snake_case : List[Any]=32 ,_snake_case : Union[str, Any]=1 ,_snake_case : List[Any]=[8, 5, 4, 2] ,_snake_case : Optional[Any]="weight_norm" ,_snake_case : List[Any]=7 ,_snake_case : Tuple=7 ,_snake_case : Dict=3 ,_snake_case : Optional[Any]=2 ,_snake_case : List[str]=True ,_snake_case : Any="reflect" ,_snake_case : Optional[int]=2 ,_snake_case : Tuple=2 ,_snake_case : List[str]=1.0 ,_snake_case : Dict=1_024 ,_snake_case : str=None ,_snake_case : Any=True ,**_snake_case : Any ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = target_bandwidths
lowercase__ : Optional[Any] = sampling_rate
lowercase__ : List[Any] = audio_channels
lowercase__ : int = normalize
lowercase__ : Union[str, Any] = chunk_length_s
lowercase__ : int = overlap
lowercase__ : Tuple = hidden_size
lowercase__ : Optional[int] = num_filters
lowercase__ : List[str] = num_residual_layers
lowercase__ : int = upsampling_ratios
lowercase__ : List[Any] = norm_type
lowercase__ : List[str] = kernel_size
lowercase__ : Union[str, Any] = last_kernel_size
lowercase__ : Any = residual_kernel_size
lowercase__ : str = dilation_growth_rate
lowercase__ : int = use_causal_conv
lowercase__ : Any = pad_mode
lowercase__ : Dict = compress
lowercase__ : Optional[int] = num_lstm_layers
lowercase__ : Union[str, Any] = trim_right_ratio
lowercase__ : Dict = codebook_size
lowercase__ : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
lowercase__ : List[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__lowercase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 16
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
| 262
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 365
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """spiece.model"""}
lowercase__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ):
_lowerCamelCase : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
_lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
_lowerCamelCase : List[Any] = 3
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : Optional[Any] = remove_space
_lowerCamelCase : Union[str, Any] = keep_accents
_lowerCamelCase : int = vocab_file
_lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
_lowerCamelCase : Optional[Any] = jieba
_lowerCamelCase : List[Any] = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A_ ( self ):
return len(self.sp_model )
def A_ ( self ):
_lowerCamelCase : Tuple = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase : Optional[Any] = self.__dict__.copy()
_lowerCamelCase : Optional[int] = None
return state
def __setstate__( self , lowercase ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self , lowercase ):
if self.remove_space:
_lowerCamelCase : List[str] = ' '.join(inputs.strip().split() )
else:
_lowerCamelCase : Tuple = inputs
_lowerCamelCase : Optional[int] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_lowerCamelCase : Any = unicodedata.normalize('NFKD' , lowercase )
_lowerCamelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
_lowerCamelCase : Dict = outputs.lower()
return outputs
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = self.preprocess_text(lowercase )
_lowerCamelCase : List[Any] = self.sp_model.encode(lowercase , out_type=lowercase )
_lowerCamelCase : int = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCamelCase : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCamelCase : Any = cur_pieces[1:]
else:
_lowerCamelCase : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def A_ ( self , lowercase ):
return self.sp_model.PieceToId(lowercase )
def A_ ( self , lowercase ):
return self.sp_model.IdToPiece(lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = ''.join(lowercase ).replace(lowercase , ' ' ).strip()
return out_string
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A_ ( self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Optional[int] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , 'wb' ) as fi:
_lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : Any = super()._decode(*lowercase , **lowercase )
_lowerCamelCase : List[Any] = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 96
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 1
|
import os
def lowerCAmelCase__ ( ):
snake_case_ : List[Any] = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
snake_case_ : List[Any] = os.path.join(__UpperCAmelCase , "triangle.txt" )
with open(__UpperCAmelCase ) as f:
snake_case_ : List[str] = f.readlines()
snake_case_ : Dict = []
for line in triangle:
snake_case_ : List[Any] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__UpperCAmelCase ) )
a.append(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
snake_case_ : Optional[int] = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case_ : Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCAmelCase , __UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 355
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : Tuple = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : int = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Optional[int] = do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case_ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 36
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''megatron-bert'''
def __init__( self ,A__=2_9_0_5_6 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__=True ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
| 101
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 50_000_000 ):
'''simple docstring'''
lowercase__ : List[Any] = set()
lowercase__ : Any = int((limit - 24) ** (1 / 2) )
lowercase__ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE_ ) ) )
for primea in primes:
lowercase__ : Any = primea * primea
for primea in primes:
lowercase__ : Optional[int] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ : Dict = primea * primea * primea * primea
lowercase__ : List[Any] = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 216
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
snake_case_ = {
'''gpt2''': 1_024,
'''gpt2-medium''': 1_024,
'''gpt2-large''': 1_024,
'''gpt2-xl''': 1_024,
'''distilgpt2''': 1_024,
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : str = GPTaTokenizer
def __init__( self , a=None , a=None , a=None , a="<|endoftext|>" , a="<|endoftext|>" , a="<|endoftext|>" , a=False , **a , ):
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , add_prefix_space=a , **a , )
lowercase__ : int = kwargs.pop('add_bos_token' , a)
lowercase__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , a) != add_prefix_space:
lowercase__ : Optional[Any] = getattr(a , pre_tok_state.pop('type'))
lowercase__ : List[Any] = add_prefix_space
lowercase__ : str = pre_tok_class(**a)
lowercase__ : Tuple = add_prefix_space
def snake_case_ ( self , *a , **a):
lowercase__ : Tuple = kwargs.get('is_split_into_words' , a)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a , **a)
def snake_case_ ( self , *a , **a):
lowercase__ : Optional[Any] = kwargs.get('is_split_into_words' , a)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a , **a)
def snake_case_ ( self , a , a = None):
lowercase__ : Any = self._tokenizer.model.save(a , name=a)
return tuple(a)
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
lowercase__ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 216
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( A__ , A__ ):
_a : int= "swin"
_a : Tuple= {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,snake_case=224 ,snake_case=4 ,snake_case=3 ,snake_case=96 ,snake_case=[2, 2, 6, 2] ,snake_case=[3, 6, 12, 24] ,snake_case=7 ,snake_case=4.0 ,snake_case=True ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.1 ,snake_case="gelu" ,snake_case=False ,snake_case=0.02 ,snake_case=1e-5 ,snake_case=32 ,snake_case=None ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase : Optional[Any] = image_size
lowercase : List[str] = patch_size
lowercase : Tuple = num_channels
lowercase : List[str] = embed_dim
lowercase : List[str] = depths
lowercase : Tuple = len(_lowerCAmelCase )
lowercase : Any = num_heads
lowercase : Optional[Any] = window_size
lowercase : int = mlp_ratio
lowercase : Optional[Any] = qkv_bias
lowercase : Dict = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Optional[int] = drop_path_rate
lowercase : Dict = hidden_act
lowercase : Any = use_absolute_embeddings
lowercase : int = layer_norm_eps
lowercase : Optional[int] = initializer_range
lowercase : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : int = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
lowercase : Union[str, Any] = ["""stem"""] + [f"stage{idx}" for idx in range(1 ,len(_lowerCAmelCase ) + 1 )]
lowercase , lowercase : Any = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase ,out_indices=_lowerCAmelCase ,stage_names=self.stage_names )
class __snake_case ( A__ ):
_a : List[str]= version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 20
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str ):
A = """ylacombe/bark-small"""
A = tempfile.mkdtemp()
A = """en_speaker_1"""
A = """This is a test string"""
A = """speaker_embeddings_path.json"""
A = """speaker_embeddings"""
def A (self : Optional[Any] , **_lowerCAmelCase : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def A (self : Dict ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[Any] ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A (self : int ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A (self : Union[str, Any] ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
"""semantic_prompt""": np.ones(_lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def A (self : str ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 258
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, "sqlalchemy.sql.Selectable"] , _SCREAMING_SNAKE_CASE: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _SCREAMING_SNAKE_CASE: Optional[Features] = None , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> str:
"""simple docstring"""
super().__init__(features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = Sql(
cache_dir=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , sql=_SCREAMING_SNAKE_CASE , con=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , )
# Build dataset for splits
UpperCamelCase_ = self.builder.as_dataset(
split="train" , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class _UpperCamelCase :
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Dataset , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> Optional[Any]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase_ = dataset
UpperCamelCase_ = name
UpperCamelCase_ = con
UpperCamelCase_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase_ = num_proc
UpperCamelCase_ = to_sql_kwargs
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.to_sql_kwargs.pop("sql" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.to_sql_kwargs.pop("con" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.to_sql_kwargs.pop("index" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self._write(index=_SCREAMING_SNAKE_CASE , **self.to_sql_kwargs )
return written
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = args
UpperCamelCase_ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase_ = query_table(
table=self.dataset.data , key=slice(_SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase_ = batch.to_pandas()
UpperCamelCase_ = df.to_sql(self.name , self.con , index=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return num_rows or len(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase_ , UpperCamelCase_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 328
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _UpperCAmelCase ( lowerCAmelCase__ ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=_UpperCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self : Optional[Any] , _lowercase : str , _lowercase : str , _lowercase : bool , _lowercase : bool ):
__UpperCAmelCase = model
__UpperCAmelCase = cache
__UpperCAmelCase = force
__UpperCAmelCase = trust_remote_code
def a ( self : int ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 332
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "▁"
UpperCAmelCase : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Optional[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
UpperCAmelCase : Any = {
"google/pegasus-xsum": 512,
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PegasusTokenizer
__a = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : List[Any]="<unk>" , UpperCamelCase : Optional[Any]="<mask_2>" , UpperCamelCase : int="<mask_1>" , UpperCamelCase : str=None , UpperCamelCase : Optional[int]=103 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = offset
if additional_special_tokens is not None:
if not isinstance(a_ , a_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(a_ )}, but is'''
f''' {type(a_ )}''' )
__UpperCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(a_ ) , self.offset - 1 )
]
if len(set(a_ ) ) != len(a_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__UpperCAmelCase : Union[str, Any] = additional_special_tokens_extended
else:
__UpperCAmelCase : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
a_ , tokenizer_file=a_ , pad_token=a_ , eos_token=a_ , unk_token=a_ , mask_token=a_ , mask_token_sent=a_ , offset=a_ , additional_special_tokens=a_ , **a_ , )
__UpperCAmelCase : Any = vocab_file
__UpperCAmelCase : Dict = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List , UpperCamelCase : Optional[List] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(a_ )
elif token_ids_a is None:
return self._special_token_mask(a_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : int=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 359
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : int = list(model.children() )[:-2]
__UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Any = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : str = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : int = max_seq_length
__UpperCAmelCase : int = transforms
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Any = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> int:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 320
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : int =["""image_processor""", """tokenizer"""]
UpperCamelCase__ : Any ="""BridgeTowerImageProcessor"""
UpperCamelCase__ : List[str] =("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel_values + pixel_mask
__UpperCamelCase : Tuple =self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , **lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer.model_input_names
__UpperCamelCase : Optional[int] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 71
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ : int = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
SCREAMING_SNAKE_CASE__ : Tuple = dict(zip(_a , range(len(_a ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ : List[Any] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _a ( self , **_a ) -> int:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **_a )
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **_a )
def _a ( self , **_a ) -> Tuple:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Dict = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = OwlViTProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
SCREAMING_SNAKE_CASE__ : Dict = OwlViTProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor(do_normalize=_a )
SCREAMING_SNAKE_CASE__ : Dict = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = OwlViTProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(images=_a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = OwlViTProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(text=_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OwlViTProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = """lower newer"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """google/owlvit-base-patch32"""
SCREAMING_SNAKE_CASE__ : str = OwlViTProcessor.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Any = ["""cat""", """nasa badge"""]
SCREAMING_SNAKE_CASE__ : List[Any] = processor(text=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """google/owlvit-base-patch32"""
SCREAMING_SNAKE_CASE__ : Dict = OwlViTProcessor.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = [["""cat""", """nasa badge"""], ["""person"""]]
SCREAMING_SNAKE_CASE__ : Any = processor(text=_a )
SCREAMING_SNAKE_CASE__ : List[str] = 16
SCREAMING_SNAKE_CASE__ : Dict = len(_a )
SCREAMING_SNAKE_CASE__ : Dict = max([len(_a ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """google/owlvit-base-patch32"""
SCREAMING_SNAKE_CASE__ : str = OwlViTProcessor.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""cat""", """nasa badge"""]
SCREAMING_SNAKE_CASE__ : Dict = processor(text=_a )
SCREAMING_SNAKE_CASE__ : Dict = 16
SCREAMING_SNAKE_CASE__ : int = inputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : str = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict = OwlViTProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Dict = processor(images=_a , query_images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OwlViTProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Any = processor.batch_decode(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
| 56
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : int = embeddings_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : str = len(_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a ( self , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = RegNetModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = RegNetForImageClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :Optional[Any] = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Optional[int] = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Dict = False
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = RegNetModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = ConfigTester(self , config_class=_a , has_text_modality=_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(_a )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_a , _a , _a ):
SCREAMING_SNAKE_CASE__ : Any = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**self._prepare_for_class(_a , _a ) )
SCREAMING_SNAKE_CASE__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ : Tuple = layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Dict = True
check_hidden_states_output(_a , _a , _a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _a ( self ) -> List[str]:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = RegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 56
| 1
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCamelCase__ ( __lowercase ):
def __init__(self : int , snake_case_ : Callable , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[dict] = None , snake_case_ : Optional[int] = None , **snake_case_ : Optional[Any] , ):
super().__init__(
features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__a : str = Generator(
cache_dir=snake_case_ , features=snake_case_ , generator=snake_case_ , gen_kwargs=snake_case_ , **snake_case_ , )
def lowerCAmelCase (self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
__a : Tuple = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__a : Optional[int] = None
__a : Any = None
__a : List[str] = None
__a : Any = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__a : Optional[int] = self.builder.as_dataset(
split='''train''' , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
| 216
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase__ =['text', 'image', 'audio']
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
inputs.append(create_inputs(lowerCAmelCase__ ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def __UpperCamelCase ( lowerCAmelCase__ : List ):
__a : List[str] = []
for output in outputs:
if isinstance(lowerCAmelCase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(lowerCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(lowerCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class UpperCamelCase__ :
def lowerCAmelCase (self : Any ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
__a : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__a : Optional[int] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase (self : List[Any] ):
__a : Union[str, Any] = create_inputs(self.tool.inputs )
__a : List[Any] = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__a : Tuple = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowerCAmelCase (self : List[Any] ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def lowerCAmelCase (self : Any ):
__a : Any = create_inputs(self.tool.inputs )
__a : Union[str, Any] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__a : Tuple = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__a : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowerCAmelCase (self : Optional[int] ):
__a : Any = create_inputs(self.tool.inputs )
__a : Dict = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__a : Optional[Any] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__a : Dict = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
| 216
| 1
|
from itertools import count
def _a ( UpperCAmelCase = 50 ) -> int:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 368
|
from math import factorial
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Dict , A : Any ) ->Optional[Any]:
lowerCamelCase__ : Tuple = real
if isinstance(A , A ):
lowerCamelCase__ : Optional[int] = [1] * rank
else:
lowerCamelCase__ : List[Any] = rank
def __repr__( self : Tuple ) ->str:
return (
F"{self.real}+"
F"{'+'.join(str(A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase__ : Tuple = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , A )
def __add__( self : Union[str, Any] , A : int ) ->str:
if not isinstance(A , A ):
return Dual(self.real + other , self.duals )
lowerCamelCase__ : int = self.duals.copy()
lowerCamelCase__ : int = other.duals.copy()
if len(A ) > len(A ):
o_dual.extend([1] * (len(A ) - len(A )) )
elif len(A ) < len(A ):
s_dual.extend([1] * (len(A ) - len(A )) )
lowerCamelCase__ : Optional[Any] = []
for i in range(len(A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , A )
_UpperCAmelCase : List[Any] = __add__
def __sub__( self : Any , A : Dict ) ->int:
return self + other * -1
def __mul__( self : Optional[Any] , A : List[Any] ) ->Union[str, Any]:
if not isinstance(A , A ):
lowerCamelCase__ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , A )
lowerCamelCase__ : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , A )
_UpperCAmelCase : Optional[Any] = __mul__
def __truediv__( self : int , A : List[Any] ) ->Dict:
if not isinstance(A , A ):
lowerCamelCase__ : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , A )
raise ValueError
def __floordiv__( self : Dict , A : Union[str, Any] ) ->Union[str, Any]:
if not isinstance(A , A ):
lowerCamelCase__ : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , A )
raise ValueError
def __pow__( self : Any , A : List[Any] ) ->Tuple:
if n < 0 or isinstance(A , A ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase__ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if not callable(UpperCAmelCase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(UpperCAmelCase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''differentiate() requires an int as input for order''' )
lowerCamelCase__ : List[str] = Dual(UpperCAmelCase , 1 )
lowerCamelCase__ : Any = func(UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 265
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
__UpperCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__UpperCamelCase = dataset
__UpperCamelCase = name
__UpperCamelCase = con
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = to_sql_kwargs
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas()
__UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 328
|
def A_ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowercase__ : List[str] = generate_large_matrix()
lowercase__ : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( snake_case : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid )
assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) )
def A_ ( snake_case : list[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(snake_case ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case ) * len(grid[0] )) - total
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(snake_case ):
if number < 0:
total += len(snake_case ) - i
break
return total
def A_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328
| 1
|
'''simple docstring'''
import os
lowerCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = 0
while index < len(__lowercase ) - 1:
_A = SYMBOLS[numerals[index]]
_A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = ""
_A = num // 1000
numerals += m_count * "M"
num %= 1000
_A = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( __lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_A = 0
with open(os.path.dirname(__lowercase ) + roman_numerals_filename ) as filea:
_A = filea.readlines()
for line in lines:
_A = line.strip()
_A = parse_roman_numerals(__lowercase )
_A = generate_roman_numerals(__lowercase )
savings += len(__lowercase ) - len(__lowercase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''levit'''
def __init__( self : str , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Any=[128, 256, 384] , __UpperCAmelCase : Optional[Any]=[4, 8, 12] , __UpperCAmelCase : Dict=[4, 4, 4] , __UpperCAmelCase : Union[str, Any]=[16, 16, 16] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : str=[2, 2, 2] , __UpperCAmelCase : Optional[Any]=[2, 2, 2] , __UpperCAmelCase : int=0.02 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = image_size
_A = num_channels
_A = kernel_size
_A = stride
_A = padding
_A = hidden_sizes
_A = num_attention_heads
_A = depths
_A = key_dim
_A = drop_path_rate
_A = patch_size
_A = attention_ratio
_A = mlp_ratio
_A = initializer_range
_A = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return 1E-4
| 174
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320
| 0
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.nn.Linear(2, 4 )
SCREAMING_SNAKE_CASE__ : Dict =torch.optim.AdamW(model.parameters(), lr=1.0 )
SCREAMING_SNAKE_CASE__ : Dict =torch.optim.lr_scheduler.OneCycleLR(UpperCamelCase__, max_lr=0.0_1, steps_per_epoch=2, epochs=1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
SCREAMING_SNAKE_CASE__ : Any =DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@require_cuda
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =Accelerator(cpu=__lowercase )
def __magic_name__ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] =GradientState()
assert state.num_steps == 1
SCREAMING_SNAKE_CASE__ : Optional[int] =4
assert state.num_steps == 4
assert state.sync_gradients is True
SCREAMING_SNAKE_CASE__ : List[str] =False
assert state.sync_gradients is False
GradientState._reset_state()
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =create_components()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : int =accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =Accelerator()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __magic_name__ ( self : Any ) -> Any:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowercase : List[str] , **__lowercase : List[str] ):
pass
with patch('''torch.cuda.set_device''' , __lowercase ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] =Accelerator()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_signature(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =Accelerator()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_signature(__lowercase )
# saving hook
def save_config(__lowercase : Tuple , __lowercase : str , __lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : Dict ={'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__lowercase , '''data.json''' ) , '''w''' ) as f:
json.dump(__lowercase , __lowercase )
# loading hook
def load_config(__lowercase : Tuple , __lowercase : int ):
with open(os.path.join(__lowercase , '''data.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : str =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =config['''class_name''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =accelerator.register_save_state_pre_hook(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =accelerator.register_load_state_pre_hook(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE__ : List[Any] ='''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE__ : List[Any] ='''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __magic_name__ ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =Accelerator()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =create_components()
SCREAMING_SNAKE_CASE__ : int =None
# This should work
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(dummy_obj is None )
def __magic_name__ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =Accelerator()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =create_components()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[1, 2, 3]
# This should work
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase ) , __lowercase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase ) , __lowercase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase ) , __lowercase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase ) , __lowercase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase ) , __lowercase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase ) , __lowercase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def __magic_name__ ( self : str ) -> Any:
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE__ : Any =AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map={'''''': 0} , )
SCREAMING_SNAKE_CASE__ : Any =Accelerator()
# This should work
SCREAMING_SNAKE_CASE__ : str =accelerator.prepare(__lowercase )
@slow
@require_bnb
def __magic_name__ ( self : Any ) -> Any:
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator()
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : str =AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[Any] =infer_auto_device_map(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ='''cpu'''
SCREAMING_SNAKE_CASE__ : List[Any] =AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=__lowercase , load_in_abit=__lowercase , llm_inta_enable_fpaa_cpu_offload=__lowercase )
# This should not work and get value error
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =accelerator.prepare(__lowercase )
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self : List[Any] ) -> Any:
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any =AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
SCREAMING_SNAKE_CASE__ : Optional[int] =infer_auto_device_map(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =1
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Accelerator()
# This should not work and get value error
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Dict =accelerator.prepare(__lowercase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self : List[str] ) -> int:
from transformers import AutoModelForCausalLM
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Tuple =AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =infer_auto_device_map(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =1
SCREAMING_SNAKE_CASE__ : str =AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
SCREAMING_SNAKE_CASE__ : int =Accelerator()
# This should work
SCREAMING_SNAKE_CASE__ : int =accelerator.prepare(__lowercase )
@require_cuda
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : List[str] =torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : str =torch.optim.SGD(model.parameters() , lr=0.01 )
SCREAMING_SNAKE_CASE__ : Any =Accelerator(cpu=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =accelerator.prepare(__lowercase )
| 222
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : List[str] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : List[Any] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Tuple =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : int =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : List[str] =resample
SCREAMING_SNAKE_CASE__ : List[Any] =do_center_crop
SCREAMING_SNAKE_CASE__ : str =crop_size
SCREAMING_SNAKE_CASE__ : List[str] =do_rescale
SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize
SCREAMING_SNAKE_CASE__ : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Dict , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : int , ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : int =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[str] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : int =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Any =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : str =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Dict =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Tuple =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : List[Tuple] = None ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : List[Any] =[]
for idx in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 222
| 1
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
a : Any = Mapping[str, np.ndarray]
a : Tuple = Mapping[str, Any] # Is a nested dict.
a : Union[str, Any] = 0.01
@dataclasses.dataclass(frozen=_lowerCamelCase )
class a :
snake_case_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
snake_case_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
snake_case_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
snake_case_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
snake_case_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
snake_case_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
snake_case_ = None
# Templates used to generate this protein (prediction-only)
snake_case_ = None
# Chain corresponding to each parent
snake_case_ = None
def __magic_name__ ( __UpperCAmelCase ) -> Protein:
'''simple docstring'''
snake_case_ = r'''(\[[A-Z]+\]\n)'''
snake_case_ = [tag.strip() for tag in re.split(__UpperCAmelCase, __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0]
snake_case_ = zip(tags[0::2], [l.split('''\n''' ) for l in tags[1::2]] )
snake_case_ = ["N", "CA", "C"]
snake_case_ = None
snake_case_ = None
snake_case_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case_ = g[1][0].strip()
for i in range(len(__UpperCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case_ = '''X''' # FIXME: strings are immutable
snake_case_ = np.array(
[residue_constants.restype_order.get(__UpperCAmelCase, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case_ = []
for axis in range(3 ):
tertiary.append(list(map(__UpperCAmelCase, g[1][axis].split() ) ) )
snake_case_ = np.array(__UpperCAmelCase )
snake_case_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__UpperCAmelCase ):
snake_case_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case_ = np.array(list(map({'''-''': 0, '''+''': 1}.get, g[1][0].strip() ) ) )
snake_case_ = np.zeros(
(
len(__UpperCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__UpperCAmelCase ):
snake_case_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__UpperCAmelCase, atom_mask=__UpperCAmelCase, aatype=__UpperCAmelCase, residue_index=np.arange(len(__UpperCAmelCase ) ), b_factors=__UpperCAmelCase, )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 0 ) -> List[str]:
'''simple docstring'''
snake_case_ = []
snake_case_ = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
snake_case_ = prot.parents
snake_case_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case_ = [p for i, p in zip(__UpperCAmelCase, __UpperCAmelCase ) if i == chain_id]
if parents is None or len(__UpperCAmelCase ) == 0:
snake_case_ = ['''N/A''']
pdb_headers.append(F"PARENT {' '.join(__UpperCAmelCase )}" )
return pdb_headers
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = []
snake_case_ = pdb_str.split('''\n''' )
snake_case_ = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
snake_case_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
snake_case_ = []
if prot.parents_chain_index is not None:
snake_case_ = {}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(__UpperCAmelCase ), [] )
parent_dict[str(__UpperCAmelCase )].append(__UpperCAmelCase )
snake_case_ = max([int(__UpperCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case_ = parent_dict.get(str(__UpperCAmelCase ), ['''N/A'''] )
parents_per_chain.append(__UpperCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case_ = [['''N/A''']]
def make_parent_line(__UpperCAmelCase ) -> str:
return F"PARENT {' '.join(__UpperCAmelCase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case_ = 0
for i, l in enumerate(__UpperCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__UpperCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__UpperCAmelCase ):
snake_case_ = parents_per_chain[chain_counter]
else:
snake_case_ = ['''N/A''']
out_pdb_lines.append(make_parent_line(__UpperCAmelCase ) )
return "\n".join(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = residue_constants.restypes + ['''X''']
def res_atoa(__UpperCAmelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r], '''UNK''' )
snake_case_ = residue_constants.atom_types
snake_case_ = []
snake_case_ = prot.atom_mask
snake_case_ = prot.aatype
snake_case_ = prot.atom_positions
snake_case_ = prot.residue_index.astype(np.intaa )
snake_case_ = prot.b_factors
snake_case_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
snake_case_ = get_pdb_headers(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
pdb_lines.extend(__UpperCAmelCase )
snake_case_ = aatype.shape[0]
snake_case_ = 1
snake_case_ = 0
snake_case_ = string.ascii_uppercase
snake_case_ = None
# Add all atom sites.
for i in range(__UpperCAmelCase ):
snake_case_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__UpperCAmelCase, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
snake_case_ = '''ATOM'''
snake_case_ = atom_name if len(__UpperCAmelCase ) == 4 else F" {atom_name}"
snake_case_ = ''''''
snake_case_ = ''''''
snake_case_ = 1.0_0
snake_case_ = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case_ = ''''''
snake_case_ = '''A'''
if chain_index is not None:
snake_case_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case_ = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(__UpperCAmelCase )
atom_index += 1
snake_case_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case_ = True
snake_case_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case_ = '''TER'''
snake_case_ = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(__UpperCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__UpperCAmelCase, __UpperCAmelCase ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''], atom_positions=result['''final_atom_positions'''], atom_mask=result['''final_atom_mask'''], residue_index=features['''residue_index'''] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ), chain_index=__UpperCAmelCase, remark=__UpperCAmelCase, parents=__UpperCAmelCase, parents_chain_index=__UpperCAmelCase, )
| 56
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
elif "subsample" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
snake_case_ = mam_aaa['''args''']
snake_case_ = mam_aaa['''model''']
snake_case_ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__UpperCAmelCase )
rename_keys(__UpperCAmelCase )
snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case_ = args.share_decoder_input_output_embed
snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case_ = SpeechaTextConfig(
vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, )
snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase )
snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F" but all the following weights are missing {missing}" )
if tie_embeds:
snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ = lm_head_weights
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 56
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:List[Any] = "mobilenet_v1"
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.9_9_9 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.0_0_1 , **_SCREAMING_SNAKE_CASE , )-> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =depth_multiplier
lowerCamelCase_ =min_depth
lowerCamelCase_ =hidden_act
lowerCamelCase_ =tf_padding
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:int = version.parse("1.11")
@property
def _snake_case ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _snake_case ( self )-> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _snake_case ( self )-> float:
return 1E-4
| 49
|
from __future__ import annotations
def __UpperCamelCase ( _A : list[int | str] ) ->None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def __UpperCamelCase ( _A : list[int | str] , _A : list[int | str] , _A : int , _A : list[int] , ) ->None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase_ =True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
lowerCamelCase_ =False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 49
| 1
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = True , UpperCAmelCase_ : Optional[int] = 1 / 255 , UpperCAmelCase_ : Tuple = True , UpperCAmelCase_ : str = 8 , **UpperCAmelCase_ : str , ) ->None:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
snake_case_ = pad_size
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Optional[int] ) ->np.ndarray:
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = None ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = get_image_size(UpperCAmelCase_ )
snake_case_ = (old_height // size + 1) * size - old_height
snake_case_ = (old_width // size + 1) * size - old_width
return pad(UpperCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Optional[Any] = ChannelDimension.FIRST , **UpperCAmelCase_ : Dict , ) ->List[Any]:
"""simple docstring"""
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_pad if do_pad is not None else self.do_pad
snake_case_ = pad_size if pad_size is not None else self.pad_size
snake_case_ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_pad:
snake_case_ = [self.pad(UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
snake_case_ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
snake_case_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 347
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __lowerCamelCase :
lowerCamelCase_ : Any = MBartConfig
lowerCamelCase_ : Optional[Any] = {}
lowerCamelCase_ : str = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def lowerCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ = prepare_mbart_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = TFMBartModel(config=lowerCamelCase ).get_decoder()
snake_case_ = inputs_dict["""input_ids"""]
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict["""attention_mask"""][:1, :]
snake_case_ = inputs_dict["""head_mask"""]
snake_case_ = 1
# first forward pass
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case_ , snake_case_ = outputs.to_tuple()
snake_case_ = past_key_values[1]
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowerCamelCase_ : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCamelCase_ : Optional[int] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase_ : Optional[int] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Dict = False
lowerCamelCase_ : int = False
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
snake_case_ = TFMBartModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
lowerCamelCase_ : int = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowerCamelCase_ : Optional[int] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowerCamelCase_ : Optional[int] = 'facebook/mbart-large-en-ro'
@cached_property
def lowerCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase_ ( self ) -> Any:
"""simple docstring"""
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.translate_src_text(**lowerCamelCase )
self.assertListEqual(self.expected_text , lowerCamelCase )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ = self.tokenizer(self.src_text , **lowerCamelCase , return_tensors="""tf""" )
snake_case_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ = self.tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
return generated_words
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 352
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase( lowercase_ = "" ) -> dict[str, float]:
'''simple docstring'''
snake_case_ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case_ = BeautifulSoup(requests.get(lowercase_ ).text , """html.parser""" )
snake_case_ = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case_ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def UpperCamelCase( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
snake_case_ = get_imdb_top_aaa_movies()
with open(lowercase_ , """w""" , newline="""""" ) as out_file:
snake_case_ = csv.writer(lowercase_ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 34
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['image_processor', 'tokenizer']
__UpperCamelCase : Dict = 'BlipImageProcessor'
__UpperCamelCase : List[str] = 'AutoTokenizer'
def __init__(self , __lowercase , __lowercase , __lowercase ):
super().__init__(__lowercase , __lowercase )
# add QFormer tokenizer
__lowerCAmelCase = qformer_tokenizer
def __call__(self , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = 0 , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = None , **__lowercase , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__lowerCAmelCase = BatchFeature()
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_token_type_ids=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
encoding.update(__lowercase )
__lowerCAmelCase = self.qformer_tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_token_type_ids=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
__lowerCAmelCase = qformer_text_encoding.pop('''input_ids''' )
__lowerCAmelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__lowerCAmelCase = self.image_processor(__lowercase , return_tensors=__lowercase )
encoding.update(__lowercase )
return encoding
def _snake_case (self , *__lowercase , **__lowercase ):
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def _snake_case (self , *__lowercase , **__lowercase ):
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _snake_case (self , __lowercase , **__lowercase ):
if os.path.isfile(__lowercase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowercase , exist_ok=__lowercase )
__lowerCAmelCase = os.path.join(__lowercase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__lowercase )
return super().save_pretrained(__lowercase , **__lowercase )
@classmethod
def _snake_case (cls , __lowercase , **__lowercase ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__lowercase , subfolder='''qformer_tokenizer''' )
__lowerCAmelCase = cls._get_arguments_from_pretrained(__lowercase , **__lowercase )
args.append(__lowercase )
return cls(*__lowercase )
| 174
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 174
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowercase : Any = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowercase : List[Any] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowercase : Optional[int] = 0
lowercase : Any = 1
lowercase : Any = 2
lowercase : str = 3
lowercase : Optional[Any] = 4
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= VOCAB_FILES_NAMES
_a : Optional[Any]= PRETRAINED_VOCAB_FILES_MAP
_a : List[str]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any]= "left"
def __init__( self ,snake_case ,snake_case=False ,snake_case=True ,snake_case=False ,snake_case="<s>" ,snake_case="</s>" ,snake_case="<unk>" ,snake_case="<sep>" ,snake_case="<pad>" ,snake_case="<cls>" ,snake_case="<mask>" ,snake_case=["<eop>", "<eod>"] ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : str = AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else mask_token
lowercase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case ,remove_space=snake_case ,keep_accents=snake_case ,bos_token=snake_case ,eos_token=snake_case ,unk_token=snake_case ,sep_token=snake_case ,pad_token=snake_case ,cls_token=snake_case ,mask_token=snake_case ,additional_special_tokens=snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**snake_case ,)
lowercase : str = 3
lowercase : str = do_lower_case
lowercase : List[Any] = remove_space
lowercase : Dict = keep_accents
lowercase : Union[str, Any] = vocab_file
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowercase : Any = self.__dict__.copy()
lowercase : List[str] = None
return state
def __setstate__( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowercase : List[str] = {}
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.remove_space:
lowercase : Optional[int] = """ """.join(inputs.strip().split() )
else:
lowercase : Optional[int] = inputs
lowercase : Dict = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
lowercase : int = unicodedata.normalize("""NFKD""" ,snake_case )
lowercase : Any = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
lowercase : Any = outputs.lower()
return outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.preprocess_text(snake_case )
lowercase : Any = self.sp_model.encode(snake_case ,out_type=snake_case )
lowercase : Tuple = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowercase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : Any = cur_pieces[1:]
else:
lowercase : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = """""".join(snake_case ).replace(snake_case ,""" """ ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = False ,snake_case = None ,snake_case = True ,**snake_case ,):
'''simple docstring'''
lowercase : List[str] = kwargs.pop("""use_source_tokenizer""" ,snake_case )
lowercase : List[str] = self.convert_ids_to_tokens(snake_case ,skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase : Optional[int] = []
lowercase : Optional[int] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
lowercase : Union[str, Any] = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase : Any = """""".join(snake_case )
lowercase : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase : int = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : int = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case ,token_ids_a=snake_case ,already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : List[str] = [self.sep_token_id]
lowercase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase : Any = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case ,"""wb""" ) as fi:
lowercase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 285
| 1
|
from math import factorial
def A ( lowercase = 100 ) -> int:
'''simple docstring'''
return sum(int(lowercase ) for x in str(factorial(lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 222
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(A_ , 'num_heads' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=[16, 48, 96] , A_=[1, 3, 6] , A_=[1, 2, 10] , A_=[7, 3, 3] , A_=[4, 2, 2] , A_=[2, 1, 1] , A_=[2, 2, 2] , A_=[False, False, True] , A_=[0.0, 0.0, 0.0] , A_=0.02 , A_=1e-12 , A_=True , A_=True , A_=2 , ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = num_labels
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = stride_kv
UpperCamelCase = depth
UpperCamelCase = cls_token
UpperCamelCase = attention_drop_rate
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = CvtModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase , UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = CvtForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowercase : Tuple = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[Any] = False
__lowercase : List[str] = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = CvtModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = len(self.model_tester.depth )
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = CvtModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 222
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__snake_case ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case ={
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__snake_case ={
"""unc-nlp/lxmert-base-uncased""": 512,
}
__snake_case ={
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = LxmertTokenizer
def __init__( self : str , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : List[str]="[CLS]" , UpperCAmelCase__ : int="[MASK]" , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Optional[int] , ) -> Optional[Any]:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**UpperCAmelCase__ )
lowerCAmelCase = do_lower_case
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Optional[Any]:
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 55
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=9_9 , UpperCAmelCase__ : Any=3_6 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=1_0_0_0 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = text_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = LayoutLMvaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# text + image
lowerCAmelCase = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=False ) -> Optional[int]:
lowerCAmelCase = copy.deepcopy(UpperCAmelCase__ )
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[1, 2]] )
lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase = model(
input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 55
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Optional[int] = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''van'''
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=224 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Any=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[Any]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : Tuple=[64, 128, 320, 512] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-6 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = image_size
__a = num_channels
__a = patch_sizes
__a = strides
__a = hidden_sizes
__a = depths
__a = mlp_ratios
__a = hidden_act
__a = initializer_range
__a = layer_norm_eps
__a = layer_scale_init_value
__a = drop_path_rate
__a = dropout_rate
| 49
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = TextaTextGenerationPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
return generator, ["Something to write", "Something else"]
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = generator("Something there" )
self.assertEqual(lowerCamelCase__ , [{"generated_text": ANY(lowerCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
_UpperCAmelCase : Dict = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
[{"generated_text": ANY(lowerCamelCase__ )}, {"generated_text": ANY(lowerCamelCase__ )}],
[{"generated_text": ANY(lowerCamelCase__ )}, {"generated_text": ANY(lowerCamelCase__ )}],
] , )
_UpperCAmelCase : Union[str, Any] = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
[{"generated_text": ANY(lowerCamelCase__ )}, {"generated_text": ANY(lowerCamelCase__ )}],
[{"generated_text": ANY(lowerCamelCase__ )}, {"generated_text": ANY(lowerCamelCase__ )}],
] , )
with self.assertRaises(lowerCamelCase__ ):
generator(4 )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
_UpperCAmelCase : Union[str, Any] = generator("Something there" , do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , [{"generated_text": ""}] )
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Dict = generator(
"Something there" , num_return_sequences=lowerCamelCase__ , num_beams=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = generator("This is a test" , do_sample=lowerCamelCase__ , num_return_sequences=2 , return_tensors=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
_UpperCAmelCase : List[str] = generator.model.config.eos_token_id
_UpperCAmelCase : str = "<pad>"
_UpperCAmelCase : int = generator(
["This is a test", "This is a second test"] , do_sample=lowerCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase__ , )
self.assertEqual(
lowerCamelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
_UpperCAmelCase : Any = generator("Something there" , do_sample=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , [{"generated_text": ""}] )
| 322
|
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( __a):
"""simple docstring"""
UpperCamelCase__ = (DEISMultistepScheduler,)
UpperCamelCase__ = (("""num_inference_steps""", 25),)
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**UpperCAmelCase )
return config
def UpperCamelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**UpperCAmelCase )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , UpperCAmelCase=None , **UpperCAmelCase ):
"""simple docstring"""
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**UpperCAmelCase )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**UpperCAmelCase )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , 'set_timesteps' ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=UpperCAmelCase )
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=UpperCAmelCase )
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='deis' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCamelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCamelCase ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.full_loop(prediction_type='v_prediction' )
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 39
|
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) -> Tuple:
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = num_stages
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = intermediate_size
_a = hidden_act
_a = type_sequence_label_size
_a = initializer_range
_a = out_features
_a = num_labels
_a = scope
_a = num_stages
def _UpperCAmelCase ( self ) -> str:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _UpperCAmelCase ( self ) -> Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A_ : Dict = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
A_ : Dict = False
A_ : Optional[Any] = False
A_ : List[str] = False
A_ : Optional[int] = False
A_ : List[str] = False
A_ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
_a = UperNetModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> int:
return
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_a = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(__UpperCAmelCase )
_a = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_a = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def _UpperCAmelCase ( self ) -> Any:
pass
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_a = Image.open(_lowerCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__UpperCAmelCase )
_a = prepare_img()
_a = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
_a = model(**__UpperCAmelCase )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_a = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> Tuple:
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__UpperCAmelCase )
_a = prepare_img()
_a = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
_a = model(**__UpperCAmelCase )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_a = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 357
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__snake_case = datasets.load_iris()
__snake_case = np.array(data['''data'''])
__snake_case = np.array(data['''target'''])
__snake_case = data['''target_names''']
__snake_case ,__snake_case ,__snake_case ,__snake_case = train_test_split(X, y)
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int, _lowerCAmelCase : str=5 ):
"""simple docstring"""
_a = zip(_lowerCAmelCase, _lowerCAmelCase )
# List of distances of all points from the point to be classified
_a = []
for data_point in data:
_a = euclidean_distance(data_point[0], _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 153
| 0
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
assert column_title.isupper()
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 1
snake_case_ = 0
while index >= 0:
snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , UpperCamelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case_ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case_ = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def a ( self , **snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , **snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , **snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def a ( self ):
snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case_ = self.get_image_processor(do_normalize=snake_case )
snake_case_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(snake_case , return_tensors='np' )
snake_case_ = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = processor(text=snake_case )
snake_case_ = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 285
| 1
|
'''simple docstring'''
from math import factorial, pi
def SCREAMING_SNAKE_CASE__ ( __A , __A = 30 ) -> float:
if not isinstance(__A , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_snake_case = float(__A )
_snake_case = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A = 30 ) -> float:
if not isinstance(__A , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_snake_case = float(__A )
_snake_case = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 363
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase : int = NewType("DataClass", Any)
lowercase : Dict = NewType("DataClassType", Any)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
if isinstance(__A , __A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Callable[[str], Any]:
_snake_case = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A , __A )
def SCREAMING_SNAKE_CASE__ ( *,
__A = None , __A = None , __A = dataclasses.MISSING , __A = dataclasses.MISSING , __A = None , **__A , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__A , default=__A , default_factory=__A , **__A )
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
_snake_case = [dataclass_types]
_snake_case = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = F'--{field.name}'
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_snake_case = kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , 'UnionType' ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '?'
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = field.type.__args__[0]
_snake_case = '+'
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if hasattr(lowerCAmelCase_ , '_argument_group_name' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
_snake_case = '.'.join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
_snake_case = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=lowerCAmelCase_ )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}' )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
with open(Path(lowerCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 160
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ : int = logging.get_logger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["pixel_values"]
def __init__( self , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = PILImageResampling.BILINEAR , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = 1 / 255 , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = size if size is not None else {"shortest_edge": 256}
lowerCamelCase_ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ = get_size_dict(UpperCamelCase )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = PILImageResampling.BICUBIC , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ = get_resize_output_image_size(UpperCamelCase , size=size["shortest_edge"] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = get_size_dict(UpperCamelCase )
return center_crop(UpperCamelCase , size=(size["height"], size["width"]) , data_format=UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = ChannelDimension.FIRST , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(UpperCamelCase )
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
lowerCamelCase_ = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 55
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = BlenderbotSmallTokenizer
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
lowerCamelCase_ = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = "adapt act apte"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = ["adapt", "act", "ap@@", "te"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase_ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
lowerCamelCase_ = "I am a small frog."
lowerCamelCase_ = tok([src_text] , padding=UpperCamelCase , truncation=UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
lowerCamelCase_ = "I am a small frog ."
lowerCamelCase_ = "."
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 55
| 1
|
from __future__ import annotations
from typing import TypedDict
class _a ( _lowercase):
_a : str
_a : int
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_a ) )]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
lowerCAmelCase__ : Optional[Any] = all_rotations(_a )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCAmelCase__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_a ),
}
return response
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
lowerCAmelCase__ : List[str] = int(_a )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_a ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
lowerCAmelCase__ : int = [''''''] * len(_a )
for _ in range(len(_a ) ):
for i in range(len(_a ) ):
lowerCAmelCase__ : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase = '''Provide a string that I will generate its BWT transform: '''
lowerCamelCase = input(entry_msg).strip()
lowerCamelCase = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result["bwt_string"]}\''''
)
lowerCamelCase = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
f'''we get original string \'{original_string}\''''
)
| 211
|
from collections.abc import Iterable
from typing import Any
class _a :
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : int | None = None )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self : List[Any] )-> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} , indent=1 )
class _a :
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Node | None = None )-> int:
lowerCAmelCase__ : Dict = root
def __str__( self : Tuple )-> str:
return str(self.root )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Node , _SCREAMING_SNAKE_CASE : Node | None )-> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_SCREAMING_SNAKE_CASE ): # If it is the right children
lowerCAmelCase__ : List[str] = new_children
else:
lowerCAmelCase__ : Any = new_children
else:
lowerCAmelCase__ : Optional[Any] = new_children
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Node )-> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__( self : int )-> bool:
return self.root is None
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] )-> None:
lowerCAmelCase__ : Any = Node(_SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : Optional[Any] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : List[Any] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Tuple = new_node
break
else:
lowerCAmelCase__ : Union[str, Any] = parent_node.right
lowerCAmelCase__ : Dict = parent_node
def UpperCAmelCase__( self : str , *_SCREAMING_SNAKE_CASE : List[Any] )-> None:
for value in values:
self.__insert(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ : Union[str, Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Node | None = None )-> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : List[Any] = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : str = node.right
return node
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Node | None = None )-> Node | None:
if node is None:
lowerCAmelCase__ : Any = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : Dict = self.root
while node.left is not None:
lowerCAmelCase__ : Tuple = node.left
return node
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : int )-> None:
lowerCAmelCase__ : Optional[Any] = self.search(_SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.left )
else:
lowerCAmelCase__ : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Node | None )-> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=None )-> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : Node | None )-> None:
if node:
self.inorder(_SCREAMING_SNAKE_CASE , node.left )
arr.append(node.value )
self.inorder(_SCREAMING_SNAKE_CASE , node.right )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Node )-> int:
lowerCAmelCase__ : list[int] = []
self.inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
if curr_node is not None:
lowerCAmelCase__ : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(_a )
# Prints all the elements of the list in order traversal
print(_a )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_a )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 211
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A_ ( unittest.TestCase ):
_lowercase : Optional[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : Optional[Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Optional[int] = TextaTextGenerationPipeline(model=UpperCAmelCase , tokenizer=UpperCAmelCase )
return generator, ["Something to write", "Something else"]
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ) -> int:
__lowerCAmelCase: Tuple = generator('Something there' )
self.assertEqual(UpperCAmelCase , [{'generated_text': ANY(UpperCAmelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
__lowerCAmelCase: Any = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
] , )
__lowerCAmelCase: Dict = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
] , )
with self.assertRaises(UpperCAmelCase ):
generator(4 )
@require_torch
def UpperCAmelCase ( self : int ) -> Any:
__lowerCAmelCase: str = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
__lowerCAmelCase: Any = generator('Something there' , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{'generated_text': ''}] )
__lowerCAmelCase: Any = 3
__lowerCAmelCase: List[str] = generator(
'Something there' , num_return_sequences=UpperCAmelCase , num_beams=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = generator('This is a test' , do_sample=UpperCAmelCase , num_return_sequences=2 , return_tensors=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
__lowerCAmelCase: int = generator.model.config.eos_token_id
__lowerCAmelCase: str = '<pad>'
__lowerCAmelCase: Optional[Any] = generator(
['This is a test', 'This is a second test'] , do_sample=UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCAmelCase , )
self.assertEqual(
UpperCAmelCase , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCAmelCase ( self : Any ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
__lowerCAmelCase: Optional[Any] = generator('Something there' , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{'generated_text': ''}] )
| 322
|
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322
| 1
|
"""simple docstring"""
import re
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: str = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
__A = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 108
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_a = logging.get_logger(__name__)
class __lowerCamelCase ( _lowercase):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 39
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def a__ ( _SCREAMING_SNAKE_CASE = 1_777 , _SCREAMING_SNAKE_CASE = 1_855 , _SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
UpperCamelCase = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153
| 0
|
"""simple docstring"""
import sys
import turtle
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, ) ->None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(_UpperCamelCase, get_mid(_UpperCamelCase, _UpperCamelCase ), get_mid(_UpperCamelCase, _UpperCamelCase ), depth - 1 )
triangle(_UpperCamelCase, get_mid(_UpperCamelCase, _UpperCamelCase ), get_mid(_UpperCamelCase, _UpperCamelCase ), depth - 1 )
triangle(_UpperCamelCase, get_mid(_UpperCamelCase, _UpperCamelCase ), get_mid(_UpperCamelCase, _UpperCamelCase ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__a = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__a = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 353
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
lowercase : int = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase : List[Any] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase : Optional[int] = '''The dog is cute and lives in the garden house'''
lowercase : List[str] = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE__ )] )
lowercase : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase : Union[str, Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase : Any = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 173
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : int ={
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _A ( _UpperCamelCase ):
snake_case__ : Any = 'time_series_transformer'
snake_case__ : str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "student_t" , __lowerCAmelCase = "nll" , __lowerCAmelCase = 1 , __lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase = "mean" , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 32 , __lowerCAmelCase = 32 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , __lowerCAmelCase = True , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 64 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = prediction_length
lowercase = context_length or prediction_length
lowercase = distribution_output
lowercase = loss
lowercase = input_size
lowercase = num_time_features
lowercase = lags_sequence
lowercase = scaling
lowercase = num_dynamic_real_features
lowercase = num_static_real_features
lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowercase = cardinality
else:
lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowercase = embedding_dimension
else:
lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase = num_parallel_samples
# Transformer architecture configuration
lowercase = input_size * len(_UpperCAmelCase ) + self._number_of_features
lowercase = d_model
lowercase = encoder_attention_heads
lowercase = decoder_attention_heads
lowercase = encoder_ffn_dim
lowercase = decoder_ffn_dim
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = activation_function
lowercase = init_std
lowercase = use_cache
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 197
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case( ) -> Optional[int]:
snake_case__ : Dict = 9, 14 # noqa: F841
snake_case__ : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case__ : str = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case__ : Tuple = mst(_lowerCAmelCase )
snake_case__ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case__ : List[Any] = tuple(answer[:2] )
snake_case__ : Any = tuple(edge[::-1] )
assert edge in result or reverse in result
| 355
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : int = []
def lowerCamelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , **snake_case_ : str ):
self.events.append("""on_init_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[Any] , **snake_case_ : List[str] ):
self.events.append("""on_train_begin""" )
def lowerCamelCase ( self : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : int , **snake_case_ : str ):
self.events.append("""on_train_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , **snake_case_ : int ):
self.events.append("""on_epoch_begin""" )
def lowerCamelCase ( self : List[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
self.events.append("""on_epoch_end""" )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , **snake_case_ : str ):
self.events.append("""on_step_begin""" )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Any , **snake_case_ : Optional[Any] ):
self.events.append("""on_step_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : List[Any] ):
self.events.append("""on_evaluate""" )
def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Optional[int] , **snake_case_ : Any ):
self.events.append("""on_predict""" )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Dict , **snake_case_ : str ):
self.events.append("""on_save""" )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : Optional[int] ):
self.events.append("""on_log""" )
def lowerCamelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , **snake_case_ : Tuple ):
self.events.append("""on_prediction_step""" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = tempfile.mkdtemp()
def lowerCamelCase ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[Any]=0 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=64 , snake_case_ : Optional[Any]=64 , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : int ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : Optional[int] = RegressionDataset(length=snake_case_ )
snake_case__ : Dict = RegressionDataset(length=snake_case_ )
snake_case__ : Any = RegressionModelConfig(a=snake_case_ , b=snake_case_ )
snake_case__ : str = RegressionPreTrainedModel(snake_case_ )
snake_case__ : Any = TrainingArguments(self.output_dir , disable_tqdm=snake_case_ , report_to=[] , **snake_case_ )
return Trainer(
snake_case_ , snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , callbacks=snake_case_ , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
# Order doesn't matter
snake_case__ : int = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
snake_case__ : Optional[int] = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and not isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , cba.__class__ )
elif not isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(cba.__class__ , snake_case_ )
else:
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Optional[int] = 0
snake_case__ : Any = len(trainer.get_eval_dataloader() )
snake_case__ : Optional[int] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(snake_case_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.get_trainer()
snake_case__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : int = self.get_trainer(disable_tqdm=snake_case_ )
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[Any] = trainer.pop_callback(snake_case_ )
self.assertEqual(cb.__class__ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# We can also add, pop, or remove by instance
snake_case__ : Optional[Any] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : Any = self.get_trainer()
snake_case__ : Dict = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[int] = trainer.pop_callback(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : str ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=snake_case_ )
snake_case__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# Independent log/save/eval
snake_case__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# A bit of everything
snake_case__ : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case_ ) in warn_mock.call_args[0][0]
| 43
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = ['pixel_values']
def __init__(self , A = True , A = None , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 255 , A = True , A = None , A = None , **A , ) -> None:
"""simple docstring"""
super().__init__(**A )
_a = size if size is not None else {'''shortest_edge''': 384}
_a = get_size_dict(A , default_to_square=A )
_a = do_resize
_a = size
# Default value set here for backwards compatibility where the value in config is None
_a = crop_pct if crop_pct is not None else 224 / 256
_a = resample
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ (self , A , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_a = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_a = int(shortest_edge / crop_pct )
_a = get_resize_output_image_size(A , size=A , default_to_square=A )
_a = resize(image=A , size=A , resample=A , data_format=A , **A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A )
def a__ (self , A , A , A = None , **A , ) -> int:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A )
def a__ (self , A , A , A , A = None , **A , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A )
def a__ (self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = crop_pct if crop_pct is not None else self.crop_pct
_a = resample if resample is not None else self.resample
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(A , default_to_square=A )
_a = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(A ) for image in images]
if do_resize:
_a = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_rescale:
_a = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_a = [self.normalize(image=A , mean=A , std=A ) for image in images]
_a = [to_channel_dimension_format(A , A ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 211
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) != 32:
raise ValueError('''Input must be of length 32''')
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''08x''')[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''')
return little_endian_hex
def lowerCAmelCase (__A):
"""simple docstring"""
_a = b''''''
for char in message:
bit_string += format(__A , '''08b''').encode('''utf-8''')
_a = format(len(__A) , '''064b''').encode('''utf-8''')
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__A) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''')
for pos in range(0 , len(__A) , 512):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''032b''')
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__A , 2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (a + b) % 2**32
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
if shift < 0:
raise ValueError('''Shift must be non-negative''')
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCAmelCase (__A):
"""simple docstring"""
_a = preprocess(__A)
_a = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
_a = 0x67_452_301
_a = 0xEF_CDA_B89
_a = 0x98_BAD_CFE
_a = 0x10_325_476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__A):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(__A))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i]))
# Add hashed chunk to running total
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[int] = """owlvit_text_model"""
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any]=4_94_08 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Optional[Any]=20_48 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : str=16 , UpperCAmelCase : Union[str, Any]="quick_gelu" , UpperCAmelCase : Any=1e-5 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : List[Any]=1.0 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=4_94_06 , UpperCAmelCase : List[str]=4_94_07 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : int = initializer_factor
@classmethod
def A ( cls : str , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : List[str] ):
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : str = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
lowerCAmelCase_ : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
__snake_case : List[str] = """owlvit_vision_model"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=7_68 , UpperCAmelCase : Union[str, Any]=30_72 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : int=3 , UpperCAmelCase : Dict=7_68 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Tuple="quick_gelu" , UpperCAmelCase : str=1e-5 , UpperCAmelCase : int=0.0 , UpperCAmelCase : str=0.02 , UpperCAmelCase : Optional[Any]=1.0 , **UpperCAmelCase : Dict , ):
super().__init__(**UpperCAmelCase )
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : int = patch_size
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Any = attention_dropout
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Any = initializer_factor
@classmethod
def A ( cls : List[str] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Optional[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
lowerCAmelCase_ : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
__snake_case : Dict = """owlvit"""
__snake_case : List[str] = True
def __init__( self : Tuple , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[int]=5_12 , UpperCAmelCase : Tuple=2.6592 , UpperCAmelCase : List[str]=True , **UpperCAmelCase : str , ):
super().__init__(**UpperCAmelCase )
if text_config is None:
lowerCAmelCase_ : List[str] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
lowerCAmelCase_ : str = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
lowerCAmelCase_ : Union[str, Any] = OwlViTTextConfig(**UpperCAmelCase )
lowerCAmelCase_ : Tuple = OwlViTVisionConfig(**UpperCAmelCase )
lowerCAmelCase_ : int = projection_dim
lowerCAmelCase_ : int = logit_scale_init_value
lowerCAmelCase_ : Dict = return_dict
lowerCAmelCase_ : Optional[int] = 1.0
@classmethod
def A ( cls : Union[str, Any] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Dict ):
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def A ( cls : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : List[str] = text_config
lowerCAmelCase_ : List[Any] = vision_config
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : str = self.text_config.to_dict()
lowerCAmelCase_ : Optional[int] = self.vision_config.to_dict()
lowerCAmelCase_ : Dict = self.__class__.model_type
return output
class __a ( __UpperCamelCase ):
@property
def A ( self : Tuple ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def A ( self : Optional[Any] ):
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def A ( self : Dict ):
return 1e-4
def A ( self : Any , UpperCAmelCase : "ProcessorMixin" , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : Optional["TensorType"] = None , ):
lowerCAmelCase_ : List[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , framework=UpperCAmelCase )
lowerCAmelCase_ : Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCAmelCase , framework=UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def A ( self : List[Any] ):
return 14
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28
| 1
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCAmelCase__ = threading.Lock()
lowerCAmelCase__ = None
lowerCAmelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCAmelCase__ = logging.WARNING
lowerCAmelCase__ = True
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = os.getenv("TRANSFORMERS_VERBOSITY" , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ):
'''simple docstring'''
return __name__.split("." )[0]
def a__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase : int = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase : List[Any] = False
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase : Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase : Optional[Any] = None
def a__ ( ):
'''simple docstring'''
return log_levels
def a__ ( SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowerCAmelCase : List[str] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( SCREAMING_SNAKE_CASE : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase : Optional[Any] = False
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase : List[Any] = True
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase : Optional[Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE )
def a__ ( self : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE )
def a__ ( self : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = warning_once
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase : Dict = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , snake_case__ ):
"""simple docstring"""
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase__ = _tqdm_cls()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : Tuple = False
hf_hub_utils.disable_progress_bars()
| 108
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowerCAmelCase : Optional[int] = self.model.config
else:
lowerCAmelCase : List[str] = config
lowerCAmelCase : Any = data_args
lowerCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase : Tuple = label_smoothed_nll_loss
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase : Dict = Adafactor
lowerCAmelCase : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase : int = AdamW
lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase : int = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Any = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowerCAmelCase : Tuple = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase , lowerCAmelCase : str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase : int = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = inputs.pop("labels" )
lowerCAmelCase , lowerCAmelCase : str = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._prepare_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Dict = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
lowerCAmelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase , lowerCAmelCase : Dict = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Optional[int] = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
lowerCAmelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase : int = tensor
return padded_tensor
| 108
| 1
|
import datasets
_snake_case : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_snake_case : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_snake_case : Optional[int] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
| 361
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ViTConfig()
__lowerCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCAmelCase = True
__lowerCAmelCase = int(vit_name[-12:-10] )
__lowerCAmelCase = int(vit_name[-9:-6] )
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(vit_name[-6:-4] )
__lowerCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif vit_name[9:].startswith('small' ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCAmelCase = 768
__lowerCAmelCase = 2304
__lowerCAmelCase = 8
__lowerCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase = ViTModel(lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
__lowerCAmelCase = ViTImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_, outputs.pooler_output, atol=1E-3 )
else:
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 60_08_51_47_51_43 ) -> Tuple:
'''simple docstring'''
try:
lowercase_ = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowercase_ = 2
lowercase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase_ = i
while n % i == 0:
lowercase_ = n // i
i += 1
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 136
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def __magic_name__ ( lowercase ):
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_snake_case : Any = get_logger(__name__)
class A :
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[str] = None ) -> Optional[int]:
"""simple docstring"""
_a = (
os.path.join(lowerCAmelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_a = Extractor
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_a = os.path.abspath(lowerCAmelCase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(lowerCAmelCase_ ) and not (os.path.isdir(lowerCAmelCase_ ) and os.listdir(lowerCAmelCase_ ))
)
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> str:
"""simple docstring"""
_a = self.extractor.infer_extractor_format(lowerCAmelCase_ )
if not extractor_format:
return input_path
_a = self._get_output_path(lowerCAmelCase_ )
if self._do_extract(lowerCAmelCase_ , lowerCAmelCase_ ):
self.extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return output_path
class A ( _a ):
@classmethod
@abstractmethod
def __lowerCAmelCase ( cls : str , lowerCAmelCase_ : Union[Path, str] , **lowerCAmelCase_ : List[Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class A ( _a ,_a ):
lowercase_ = []
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : int ) -> Tuple:
"""simple docstring"""
with open(lowerCAmelCase_ , '''rb''' ) as f:
return f.read(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : Any , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
_a = max(len(lowerCAmelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
_a = cls.read_magic_number(lowerCAmelCase_ , lowerCAmelCase_ )
except OSError:
return False
return any(magic_number.startswith(lowerCAmelCase_ ) for cls_magic_number in cls.magic_numbers )
class A ( _a ):
@classmethod
def __lowerCAmelCase ( cls : str , lowerCAmelCase_ : Union[Path, str] , **lowerCAmelCase_ : Any ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(lowerCAmelCase_ )
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
def resolved(lowerCAmelCase_ : str ) -> str:
return os.path.realpath(os.path.abspath(lowerCAmelCase_ ) )
def badpath(lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ).startswith(lowerCAmelCase_ )
def badlink(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_a = resolved(os.path.join(lowerCAmelCase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCAmelCase_ )
_a = resolved(lowerCAmelCase_ )
for finfo in members:
if badpath(finfo.name , lowerCAmelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_a = tarfile.open(lowerCAmelCase_ )
tar_file.extractall(lowerCAmelCase_ , members=TarExtractor.safemembers(lowerCAmelCase_ , lowerCAmelCase_ ) )
tar_file.close()
class A ( _a ):
lowercase_ = [b'\x1F\x8B']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(lowerCAmelCase_ , '''rb''' ) as gzip_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def __lowerCAmelCase ( cls : str , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(lowerCAmelCase_ , magic_number=lowerCAmelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCAmelCase_ , '''rb''' ) as fp:
_a = _EndRecData(lowerCAmelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_a = fp.read(lowerCAmelCase_ ) # CD is where we expect it to be
if len(lowerCAmelCase_ ) == sizeCentralDir:
_a = struct.unpack(lowerCAmelCase_ , lowerCAmelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with zipfile.ZipFile(lowerCAmelCase_ , '''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase_ )
zip_file.close()
class A ( _a ):
lowercase_ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(lowerCAmelCase_ ) as compressed_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_a = rarfile.RarFile(lowerCAmelCase_ )
rf.extractall(lowerCAmelCase_ )
rf.close()
class A ( _a ):
lowercase_ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
_a = zstd.ZstdDecompressor()
with open(lowerCAmelCase_ , '''rb''' ) as ifh, open(lowerCAmelCase_ , '''wb''' ) as ofh:
dctx.copy_stream(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'\x42\x5A\x68']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(lowerCAmelCase_ , '''rb''' ) as compressed_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with pyazr.SevenZipFile(lowerCAmelCase_ , '''r''' ) as archive:
archive.extractall(lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'\x04\x22\x4D\x18']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(lowerCAmelCase_ , '''rb''' ) as compressed_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCAmelCase ( cls : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return max(
len(lowerCAmelCase_ )
for extractor in cls.extractors.values()
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase_ , magic_number_length=lowerCAmelCase_ )
except OSError:
return b""
@classmethod
def __lowerCAmelCase ( cls : Any , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=lowerCAmelCase_ , )
_a = cls.infer_extractor_format(lowerCAmelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCAmelCase ( cls : List[str] , lowerCAmelCase_ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
_a = cls._get_magic_number_max_length()
_a = cls._read_magic_number(lowerCAmelCase_ , lowerCAmelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCAmelCase_ , magic_number=lowerCAmelCase_ ):
return extractor_format
@classmethod
def __lowerCAmelCase ( cls : Tuple , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(lowerCAmelCase_ ) , exist_ok=lowerCAmelCase_ )
# Prevent parallel extractions
_a = str(Path(lowerCAmelCase_ ).with_suffix('''.lock''' ) )
with FileLock(lowerCAmelCase_ ):
shutil.rmtree(lowerCAmelCase_ , ignore_errors=lowerCAmelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=lowerCAmelCase_ , )
_a = extractor if extractor != '''deprecated''' else extractor_format
else:
_a = cls.extractors[extractor_format]
return extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=lowerCAmelCase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCAmelCase_ ):
return extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ )
| 179
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=_a ):
lowercase_ = ['torch', 'scipy']
def __init__( self : Tuple , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 179
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_snake_case : Dict = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""", safety_checker=__lowercase, cache_dir=__lowercase )
_snake_case : int = [t[-1] for t in os.walk(os.path.join(__lowercase, os.listdir(__lowercase )[0], """snapshots""" ) )]
_snake_case : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""", safety_checker=__lowercase )
_snake_case : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : Optional[Any] = jax.random.PRNGKey(0 )
_snake_case : str = 4
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : Dict = num_samples * [prompt]
_snake_case : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : str = replicate(__lowercase )
_snake_case : Optional[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : Optional[Any] = shard(__lowercase )
_snake_case : Optional[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(__lowercase, dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
_snake_case : Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""flax""", safety_checker=__lowercase )
_snake_case : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : str = 50
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : Any = num_samples * [prompt]
_snake_case : Dict = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : List[str] = replicate(__lowercase )
_snake_case : List[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : str = shard(__lowercase )
_snake_case : List[str] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, safety_checker=__lowercase )
_snake_case : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : str = jax.random.PRNGKey(0 )
_snake_case : Dict = 50
_snake_case : Optional[Any] = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : List[str] = replicate(__lowercase )
_snake_case : Optional[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : Optional[int] = shard(__lowercase )
_snake_case : Optional[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa )
_snake_case : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = 50
_snake_case : Any = jax.device_count()
_snake_case : List[str] = num_samples * [prompt]
_snake_case : Tuple = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : Any = replicate(__lowercase )
_snake_case : Optional[int] = jax.random.split(__lowercase, __lowercase )
_snake_case : Dict = shard(__lowercase )
_snake_case : Optional[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", set_alpha_to_one=__lowercase, steps_offset=1, )
_snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, scheduler=__lowercase, safety_checker=__lowercase, )
_snake_case : str = scheduler.create_state()
_snake_case : Any = scheduler_state
_snake_case : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : Union[str, Any] = jax.random.PRNGKey(0 )
_snake_case : Any = 50
_snake_case : str = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : Optional[int] = replicate(__lowercase )
_snake_case : List[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : int = shard(__lowercase )
_snake_case : List[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : Optional[int] = jax.device_count()
_snake_case : int = num_samples * [prompt]
_snake_case : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ), __lowercase )
_snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, safety_checker=__lowercase, )
_snake_case : List[str] = replicate(__lowercase )
_snake_case : Optional[int] = pipeline.prepare_inputs(__lowercase )
_snake_case : List[Any] = shard(__lowercase )
_snake_case : str = pipeline(__lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_snake_case : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, safety_checker=__lowercase, use_memory_efficient_attention=__lowercase, )
_snake_case : List[str] = replicate(__lowercase )
_snake_case : Dict = pipeline.prepare_inputs(__lowercase )
_snake_case : Union[str, Any] = shard(__lowercase )
_snake_case : str = pipeline(__lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_snake_case : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 64
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A_ = """xvjiarui/stable-diffusion-2-inpainting"""
A_ , A_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__ , safety_checker=UpperCamelCase__ )
A_ = """Face of a yellow cat, high resolution, sitting on a park bench"""
A_ = jax.random.PRNGKey(0 )
A_ = 50
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = num_samples * [init_image]
A_ = num_samples * [mask_image]
A_ , A_ , A_ = pipeline.prepare_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# shard inputs and rng
A_ = replicate(UpperCamelCase__ )
A_ = jax.random.split(UpperCamelCase__ , jax.device_count() )
A_ = shard(UpperCamelCase__ )
A_ = shard(UpperCamelCase__ )
A_ = shard(UpperCamelCase__ )
A_ = pipeline(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ )
A_ = output.images.reshape(UpperCamelCase__ , 512 , 512 , 3 )
A_ = images[0, 253:256, 253:256, -1]
A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 101
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> list[float]:
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase__ )
if colsa != 1:
A_ = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase__ )
if rowsa != rowsa:
A_ = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != rowsa:
A_ = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(UpperCAmelCase__ )} and {rowsa}'''
)
raise ValueError(UpperCAmelCase__ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(UpperCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase__ ):
A_ = []
for row in range(UpperCAmelCase__ ):
A_ = 0
for col in range(UpperCAmelCase__ ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(UpperCAmelCase__ )
A_ = new_val
return [float(UpperCAmelCase__ ) for i in new_val]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
A_ , A_ = table.shape
A_ = True
for i in range(0, UpperCAmelCase__ ):
A_ = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 1
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCamelCase = Vector()
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase__ ) , '(0,0,0,0,0,1)' )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2] )
UpperCamelCase = Vector([1, 2, 3, 4, 5] )
UpperCamelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCamelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([2, -1, 4] ) # for test of dot product
UpperCamelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def A ( self : Dict ):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase__ , UpperCamelCase__ ) ) , '(3,4,7)' )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Vector([1, 0, 0, 0, 0, 0] )
UpperCamelCase = x.copy()
self.assertEqual(str(UpperCamelCase__ ) , str(UpperCamelCase__ ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase__ ) , '(0,1,0)' )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCamelCase = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def A ( self : List[str] ):
"""simple docstring"""
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28
| 1
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def lowerCamelCase ( lowerCAmelCase : np.ndarray , lowerCAmelCase : float ):
"""simple docstring"""
__magic_name__ : List[str] = math.sqrt(lowerCAmelCase )
__magic_name__ : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase ( lowerCAmelCase : np.ndarray , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : float ):
"""simple docstring"""
__magic_name__ : Tuple = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowerCAmelCase ):
for j in range(0 , lowerCAmelCase ):
__magic_name__ : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : np.ndarray , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : int , ):
"""simple docstring"""
__magic_name__ : Any = np.zeros(img.shape )
__magic_name__ : Dict = get_gauss_kernel(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__magic_name__ : List[Any] = get_slice(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : int = img_s - img_s[kernel_size // 2, kernel_size // 2]
__magic_name__ : int = vec_gaussian(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Union[str, Any] = np.multiply(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : int = np.multiply(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[int] = np.sum(lowerCAmelCase ) / np.sum(lowerCAmelCase )
__magic_name__ : List[str] = val
return imga
def lowerCamelCase ( lowerCAmelCase : list ):
"""simple docstring"""
__magic_name__ : List[str] = args[1] if args[1:] else '../image_data/lena.jpg'
__magic_name__ : Tuple = float(args[2] ) if args[2:] else 1.0
__magic_name__ : str = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__magic_name__ : List[str] = int(args[4] )
__magic_name__ : Optional[int] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__magic_name__ : str = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase :Union[str, Any] = parse_args(sys.argv)
lowerCAmelCase :int = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowerCAmelCase :Tuple = img / 2_5_5
lowerCAmelCase :Optional[int] = out.astype('''float32''')
lowerCAmelCase :List[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase :Tuple = out * 2_5_5
lowerCAmelCase :Union[str, Any] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 356
|
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase :Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 275
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A : Tuple = 0
A : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A : str = tuple[int, int]
class __A:
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[Any]:
'''simple docstring'''
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
__a = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.pos_x - self.goal_x
__a = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case ) + abs(_snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
return self.f_cost < other.f_cost
class __A:
def __init__( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _snake_case )
__a = [self.start]
__a = []
__a = False
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case )
self.closed_nodes.append(_snake_case )
__a = self.get_successors(_snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case )
else:
self.open_nodes.append(_snake_case )
return [self.start.pos]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ) )
return successors
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any:
'''simple docstring'''
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
class __A:
def __init__( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = AStar(_snake_case , _snake_case )
__a = AStar(_snake_case , _snake_case )
__a = False
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__a = self.fwd_astar.open_nodes.pop(0 )
__a = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
self.fwd_astar.closed_nodes.append(_snake_case )
self.bwd_astar.closed_nodes.append(_snake_case )
__a = current_bwd_node
__a = current_fwd_node
__a = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case )
else:
# retrieve the best current path
__a = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case )
else:
astar.open_nodes.append(_snake_case )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.fwd_astar.retrace_path(_snake_case )
__a = self.bwd_astar.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
__a = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A : Optional[Any] = (0, 0)
A : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : str = time.time()
A : List[str] = AStar(init, goal)
A : Optional[Any] = a_star.search()
A : Optional[int] = time.time() - start_time
print(F"AStar execution time = {end_time:f} seconds")
A : Optional[int] = time.time()
A : Optional[Any] = BidirectionalAStar(init, goal)
A : Tuple = time.time() - bd_start_time
print(F"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 6
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
lowercase__ = model.state_dict()
def to_tf_var_name(lowerCamelCase_ ):
for patt, repl in iter(lowerCamelCase_ ):
lowercase__ = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return F"""bert/{name}"""
def create_tf_var(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(lowerCamelCase_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = session.run(lowerCamelCase_ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}""" )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a ( lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory in which to save tensorflow model''' )
lowercase__ = parser.parse_args(lowerCamelCase_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 207
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
__UpperCamelCase = ["accelerate", "launch"]
__UpperCamelCase = Path.home() / ".cache/huggingface/accelerate"
__UpperCamelCase = "default_config.yaml"
__UpperCamelCase = config_folder / config_file
__UpperCamelCase = config_folder / "_default_config.yaml"
__UpperCamelCase = Path("tests/test_configs" )
@classmethod
def _lowerCAmelCase ( cls ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowerCAmelCase ( cls ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=_a ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(_a ), self.test_file_path] , env=os.environ.copy() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "test-tpu"
__UpperCamelCase = "us-central1-a"
__UpperCamelCase = "ls"
__UpperCamelCase = ["accelerate", "tpu-config"]
__UpperCamelCase = "cd /usr/share"
__UpperCamelCase = "tests/test_samples/test_command_file.sh"
__UpperCamelCase = "Running gcloud compute tpus tpu-vm ssh"
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=_a )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , _a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=_a , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , _a , )
| 168
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def a__ ( snake_case__ ) -> int:
lowerCamelCase , lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase = emb.weight.data
return lin_layer
def a__ ( snake_case__ ) -> Tuple:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCamelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCamelCase = checkpoint["""model"""]
remove_ignore_keys_(snake_case__ )
lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
lowerCamelCase = XGLMConfig(
vocab_size=snake_case__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase = XGLMForCausalLM(snake_case__ )
lowerCamelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
print(snake_case__ )
lowerCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Tuple = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 168
| 1
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=99 , __lowerCamelCase=13 , __lowerCamelCase=16 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=2 , __lowerCamelCase=32 , __lowerCamelCase=4 , __lowerCamelCase=4 , __lowerCamelCase=30 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=None , ):
'''simple docstring'''
__A : List[Any] = parent
__A : int = batch_size
__A : List[str] = decoder_seq_length
# For common tests
__A : Any = self.decoder_seq_length
__A : List[str] = is_training
__A : Dict = use_attention_mask
__A : List[str] = use_labels
__A : Dict = vocab_size
__A : Tuple = d_model
__A : Optional[int] = d_model
__A : Dict = decoder_layers
__A : Union[str, Any] = decoder_layers
__A : int = decoder_ffn_dim
__A : Optional[int] = decoder_attention_heads
__A : Any = decoder_attention_heads
__A : Optional[Any] = eos_token_id
__A : Tuple = bos_token_id
__A : Tuple = pad_token_id
__A : Optional[Any] = decoder_start_token_id
__A : str = use_cache
__A : Tuple = max_position_embeddings
__A : Optional[int] = None
__A : Any = decoder_seq_length
__A : Tuple = 2
__A : Optional[int] = 1
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A : List[str] = None
if self.use_attention_mask:
__A : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__A : Tuple = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A : Dict = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : Dict = True
__A : Any = TrOCRDecoder(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
__A : int = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__A : List[Any] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
__A : Optional[Any] = model(__lowerCamelCase )
__A : Optional[int] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
__A : Any = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
__A : Dict = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Dict = model(__lowerCamelCase )['''last_hidden_state''']
__A : List[str] = model(__lowerCamelCase , past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
__A : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__A : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
__A , __A , __A , __A : List[str] = config_and_inputs
__A : Dict = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
_lowerCamelCase = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_lowerCamelCase = True
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = TrOCRStandaloneDecoderModelTester(self , is_training=__lowerCamelCase )
__A : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__( self ):
'''simple docstring'''
pass
| 179
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """roberta"""
def __init__( self , __lowerCamelCase=5_0265 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : str = vocab_size
__A : Any = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : List[Any] = hidden_act
__A : Any = intermediate_size
__A : List[str] = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : str = type_vocab_size
__A : int = initializer_range
__A : Tuple = layer_norm_eps
__A : str = position_embedding_type
__A : int = use_cache
__A : Optional[int] = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 179
| 1
|
'''simple docstring'''
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : list ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set_counts
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [1] * num_sets
__SCREAMING_SNAKE_CASE = list(range(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_parent(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_parent(__SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__SCREAMING_SNAKE_CASE = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = src_parent
__SCREAMING_SNAKE_CASE = self.set_counts[src_parent]
__SCREAMING_SNAKE_CASE = max(self.max_set , __SCREAMING_SNAKE_CASE )
return True
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__SCREAMING_SNAKE_CASE = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 355
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( a__ ):
"""simple docstring"""
return x + 2
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
__SCREAMING_SNAKE_CASE = """x = y"""
__SCREAMING_SNAKE_CASE = {"""y""": 5}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = add_two(x)"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3\ny = 5"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} )
__SCREAMING_SNAKE_CASE = {"""x""": 8}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = x"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
| 331
| 0
|
import heapq as hq
import math
from collections.abc import Iterator
class lowercase :
def __init__( self ,A__):
lowercase = str(id_)
lowercase = None
lowercase = None
lowercase = []
lowercase = {} # {vertex:distance}
def __lt__( self ,A__):
return self.key < other.key
def __repr__( self):
return self.id
def A__ ( self ,A__):
self.neighbors.append(A__)
def A__ ( self ,A__ ,A__):
lowercase = weight
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for u in graph:
lowercase = math.inf
lowercase = None
lowercase = 0
lowercase = graph[:]
while q:
lowercase = min(lowerCAmelCase__ )
q.remove(lowerCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase = u
lowercase = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for u in graph:
lowercase = math.inf
lowercase = None
lowercase = 0
lowercase = list(lowerCAmelCase__ )
hq.heapify(lowerCAmelCase__ )
while h:
lowercase = hq.heappop(lowerCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase = u
lowercase = u.edges[v.id]
hq.heapify(lowerCAmelCase__ )
for i in range(1 , len(lowerCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self):
lowercase = 1
lowercase = 3
lowercase = (3_2, 3_2)
lowercase = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0)).to(A__)
return image
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = RobertaSeriesConfig(
hidden_size=3_2 ,project_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_0_0_6 ,)
return RobertaSeriesModelWithTransformation(A__)
@property
def A__ ( self):
def extract(*A__ ,**A__):
class lowercase :
def __init__( self):
lowercase = torch.ones([0])
def A__ ( self ,A__):
self.pixel_values.to(A__)
return self
return Out()
return extract
def A__ ( self):
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=A__)
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
lowercase = 7_7
lowercase = self.dummy_image.to(A__)
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=A__ ,scheduler=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,safety_checker=A__ ,feature_extractor=self.dummy_extractor ,)
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=A__)
lowercase = alt_pipe.to(A__)
alt_pipe.set_progress_bar_config(disable=A__)
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,)
lowercase = output.images
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def A__ ( self):
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=A__)
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
lowercase = 7_7
lowercase = self.dummy_image.to(A__)
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=A__ ,scheduler=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,safety_checker=A__ ,feature_extractor=self.dummy_extractor ,)
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=A__)
lowercase = alt_pipe.to(A__)
alt_pipe.set_progress_bar_config(disable=A__)
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def A__ ( self):
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((7_6_0, 5_0_4))
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
A__ ,safety_checker=A__ ,)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0)
lowercase = pipe(
prompt=A__ ,image=A__ ,strength=0.75 ,guidance_scale=7.5 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
lowercase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowercase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
lowercase = init_image.resize((7_6_8, 5_1_2))
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''')
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
A__ ,safety_checker=A__ ,)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0)
lowercase = pipe(
prompt=A__ ,image=A__ ,strength=0.75 ,guidance_scale=7.5 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 101
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : float =0.0
a : int =1
a : int =1
a : bool =True
a : bool =False
a : bool =False
a : bool =False
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=__SCREAMING_SNAKE_CASE,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels,n_heads=self.num_attention_heads,d_head=self.out_channels // self.num_attention_heads,depth=1,use_linear_projection=self.use_linear_projection,only_cross_attention=self.only_cross_attention,use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,)
attentions.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
if self.add_downsample:
__lowerCAmelCase = FlaxDownsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = ()
for resnet, attn in zip(self.resnets,self.attentions ):
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = attn(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase = self.downsamplers_a(__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : float =0.0
a : int =1
a : bool =True
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=__SCREAMING_SNAKE_CASE,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
if self.add_downsample:
__lowerCAmelCase = FlaxDownsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = ()
for resnet in self.resnets:
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase = self.downsamplers_a(__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : int
a : float =0.0
a : int =1
a : int =1
a : bool =True
a : bool =False
a : bool =False
a : bool =False
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels,n_heads=self.num_attention_heads,d_head=self.out_channels // self.num_attention_heads,depth=1,use_linear_projection=self.use_linear_projection,only_cross_attention=self.only_cross_attention,use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,)
attentions.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
if self.add_upsample:
__lowerCAmelCase = FlaxUpsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets,self.attentions ):
# pop res hidden states
__lowerCAmelCase = res_hidden_states_tuple[-1]
__lowerCAmelCase = res_hidden_states_tuple[:-1]
__lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states),axis=-1 )
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = attn(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase = self.upsamplers_a(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : int
a : float =0.0
a : int =1
a : bool =True
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
if self.add_upsample:
__lowerCAmelCase = FlaxUpsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__lowerCAmelCase = res_hidden_states_tuple[-1]
__lowerCAmelCase = res_hidden_states_tuple[:-1]
__lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states),axis=-1 )
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase = self.upsamplers_a(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : float =0.0
a : int =1
a : int =1
a : bool =False
a : bool =False
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels,out_channels=self.in_channels,dropout_prob=self.dropout,dtype=self.dtype,)
]
__lowerCAmelCase = []
for _ in range(self.num_layers ):
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels,n_heads=self.num_attention_heads,d_head=self.in_channels // self.num_attention_heads,depth=1,use_linear_projection=self.use_linear_projection,use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,)
attentions.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels,out_channels=self.in_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = self.resnets[0](__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions,self.resnets[1:] ):
__lowerCAmelCase = attn(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
return hidden_states
| 362
|
'''simple docstring'''
import random
def _lowerCAmelCase ( lowercase ) -> bool:
__lowerCAmelCase = num - 1
__lowerCAmelCase = 0
while s % 2 == 0:
__lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
__lowerCAmelCase = random.randrange(2 , num - 1 )
__lowerCAmelCase = pow(lowercase , lowercase , lowercase )
if v != 1:
__lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCAmelCase = i + 1
__lowerCAmelCase = (v**2) % num
return True
def _lowerCAmelCase ( lowercase ) -> bool:
if num < 2:
return False
__lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase )
def _lowerCAmelCase ( lowercase = 1024 ) -> int:
while True:
__lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase ):
return num
if __name__ == "__main__":
_a : Optional[int] = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 46
| 0
|
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
SCREAMING_SNAKE_CASE__ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
SCREAMING_SNAKE_CASE__ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> str:
lowerCAmelCase = get_test_to_tester_mapping(A_ )
lowerCAmelCase = get_test_to_tester_mapping(A_ )
lowerCAmelCase = {'''BertModelTest''': '''BertModelTester'''}
lowerCAmelCase = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
def _snake_case ( self ) -> str:
lowerCAmelCase = get_model_to_test_mapping(A_ )
lowerCAmelCase = get_model_to_test_mapping(A_ )
lowerCAmelCase = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
lowerCAmelCase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
def _snake_case ( self ) -> Any:
lowerCAmelCase = get_model_to_tester_mapping(A_ )
lowerCAmelCase = get_model_to_tester_mapping(A_ )
lowerCAmelCase = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
lowerCAmelCase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
self.assertEqual(get_test_info.to_json(A_ ) , A_ )
| 46
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = (3, 32, 128)
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) )
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
__lowerCAmelCase : Union[str, Any] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) )
return image_input
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__lowerCAmelCase : int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' )
__lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Any = '''test'''
__lowerCAmelCase : Dict = processor(text=A_ )
__lowerCAmelCase : str = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = '''test'''
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Optional[int] = processor.char_decode(A_ )
__lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ )
__lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.get_image_processor()
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 )
__lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 )
__lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 )
__lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 275
| 0
|
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int | None = None ,_UpperCAmelCase : int | None = None ) -> None:
if start is None:
__snake_case : Dict = 0
if end is None:
__snake_case : Optional[Any] = len(_UpperCAmelCase ) - 1
if start >= end:
return
__snake_case : List[str] = (start + end) // 2
slowsort(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
slowsort(_UpperCAmelCase ,mid + 1 ,_UpperCAmelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : Union[str, Any] = sequence[mid], sequence[end]
slowsort(_UpperCAmelCase ,_UpperCAmelCase ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a :
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=2 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=False , __magic_name__=True , __magic_name__="None" , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def __UpperCAmelCase ( self ) -> Any:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__magic_name__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
_a = TFDebertaVaModel(config=__magic_name__ )
_a = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_a = [input_ids, input_mask]
_a = model(__magic_name__ )
_a = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
_a = TFDebertaVaForMaskedLM(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
_a = self.num_labels
_a = TFDebertaVaForSequenceClassification(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = self.num_labels
_a = TFDebertaVaForTokenClassification(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
_a = TFDebertaVaForQuestionAnswering(config=__magic_name__ )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = TFDebertaVaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(__magic_name__ )
@require_tf
class a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ) -> List[str]:
pass
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_a = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
_a = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 )
| 168
|
'''simple docstring'''
import numpy as np
class a :
def __init__( self ) -> List[str]:
_a = (0, 0)
_a = None
_a = 0
_a = 0
_a = 0
def __eq__( self , __magic_name__ ) -> Optional[int]:
return self.position == cell.position
def __UpperCAmelCase ( self ) -> Any:
print(self.position )
class a :
def __init__( self , __magic_name__=(5, 5) ) -> Optional[int]:
_a = np.zeros(__magic_name__ )
_a = world_size[0]
_a = world_size[1]
def __UpperCAmelCase ( self ) -> List[Any]:
print(self.w )
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_a = cell.position[0]
_a = cell.position[1]
_a = []
for n in neughbour_cord:
_a = current_x + n[0]
_a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_a = Cell()
_a = (x, y)
_a = cell
neighbours.append(__magic_name__ )
return neighbours
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
_a = []
_a = []
_open.append(lowerCAmelCase__ )
while _open:
_a = np.argmin([n.f for n in _open] )
_a = _open[min_f]
_closed.append(_open.pop(lowerCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowerCAmelCase__ ):
for c in _closed:
if c == n:
continue
_a = current.g + 1
_a , _a = n.position
_a , _a = goal.position
_a = (ya - ya) ** 2 + (xa - xa) ** 2
_a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCAmelCase__ )
_a = []
while current.parent is not None:
path.append(current.position )
_a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a_ : str = Gridworld()
# Start position and goal
a_ : str = Cell()
a_ : Dict = (0, 0)
a_ : Dict = Cell()
a_ : Optional[Any] = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
a_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ : Any = 1
print(world.w)
| 168
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Union[str, Any] = """poolformer"""
def __init__( self :Any ,__lowercase :str=3 ,__lowercase :Optional[Any]=1_6 ,__lowercase :List[Any]=1_6 ,__lowercase :Tuple=3 ,__lowercase :List[Any]=4.0 ,__lowercase :str=[2, 2, 6, 2] ,__lowercase :Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] ,__lowercase :Any=[7, 3, 3, 3] ,__lowercase :Optional[Any]=[4, 2, 2, 2] ,__lowercase :str=[2, 1, 1, 1] ,__lowercase :Tuple=4 ,__lowercase :List[Any]=0.0 ,__lowercase :int="gelu" ,__lowercase :List[str]=True ,__lowercase :Dict=1e-5 ,__lowercase :Tuple=0.02 ,**__lowercase :int ,):
snake_case__ : Any = num_channels
snake_case__ : Union[str, Any] = patch_size
snake_case__ : Union[str, Any] = stride
snake_case__ : Dict = padding
snake_case__ : int = pool_size
snake_case__ : Tuple = hidden_sizes
snake_case__ : Tuple = mlp_ratio
snake_case__ : Union[str, Any] = depths
snake_case__ : List[str] = patch_sizes
snake_case__ : Any = strides
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Tuple = drop_path_rate
snake_case__ : str = hidden_act
snake_case__ : List[Any] = use_layer_scale
snake_case__ : Tuple = layer_scale_init_value
snake_case__ : List[Any] = initializer_range
super().__init__(**__lowercase )
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[Any] = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self :Dict ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self :List[str] ):
return 2e-3
| 44
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple=7 ,__lowercase :Optional[Any]=3 ,__lowercase :Dict=3_0 ,__lowercase :Union[str, Any]=4_0_0 ,__lowercase :Optional[int]=True ,__lowercase :int=None ,__lowercase :int=0.9 ,__lowercase :Optional[int]=None ,__lowercase :Dict=True ,__lowercase :str=[0.5, 0.5, 0.5] ,__lowercase :str=[0.5, 0.5, 0.5] ,):
snake_case__ : List[Any] = size if size is not None else {'''shortest_edge''': 3_0}
snake_case__ : Any = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : str = do_resize_and_center_crop
snake_case__ : Dict = size
snake_case__ : Union[str, Any] = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : List[str] = image_std
def __lowerCamelCase ( self :Any ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Any = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self :Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
self.assertTrue(hasattr(__lowercase ,'''crop_pct''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_std''' ) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCamelCase ( self :int ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : Union[str, Any] = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :List[str] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 44
| 1
|
from __future__ import annotations
from cmath import sqrt
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
a :Any = b * b - 4 * a * c
a :str = (-b + sqrt(UpperCAmelCase_ )) / (2 * a)
a :Dict = (-b - sqrt(UpperCAmelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 94
|
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count
| 331
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __a ( __UpperCamelCase ):
__snake_case : str = """roberta-prelayernorm"""
def __init__( self : Any , UpperCAmelCase : Any=5_02_65 , UpperCAmelCase : int=7_68 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Optional[Any]=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[int]=5_12 , UpperCAmelCase : str=2 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : Optional[int]=1e-1_2 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : List[Any]="absolute" , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : str , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Any = classifier_dropout
class __a ( __UpperCamelCase ):
@property
def A ( self : Optional[int] ):
if self.task == "multiple-choice":
lowerCAmelCase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 363
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.