code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowercase : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = eval_examples
snake_case_ : Tuple = post_process_function
snake_case_ : str = quant_trainer_args
snake_case_ : Tuple = 128 # default number of calibration samples
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
snake_case_ : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case_ : Union[str, Any] = self._remove_unused_columns(_SCREAMING_SNAKE_CASE , description="Calibration" )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=None ) -> Any:
snake_case_ : Tuple = self.train_dataset if calib_dataset is None else calib_dataset
snake_case_ : int = self.get_calib_dataloader(_SCREAMING_SNAKE_CASE )
snake_case_ : int = self.model
quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args , calib=_SCREAMING_SNAKE_CASE )
model.eval()
quant_trainer.enable_calibration(_SCREAMING_SNAKE_CASE )
logger.info("***** Running calibration *****" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(_SCREAMING_SNAKE_CASE ):
# Prediction step
snake_case_ , snake_case_ , snake_case_ : List[Any] = self.prediction_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_SCREAMING_SNAKE_CASE , self.quant_trainer_args )
snake_case_ : List[Any] = model
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "eval" ) -> int:
snake_case_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ : int = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
snake_case_ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : int = self.compute_metrics
snake_case_ : Any = None
snake_case_ : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : Dict = eval_loop(
_SCREAMING_SNAKE_CASE , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , )
finally:
snake_case_ : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case_ : str = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions )
snake_case_ : Any = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
snake_case_ : str = metrics.pop(_SCREAMING_SNAKE_CASE )
self.log(_SCREAMING_SNAKE_CASE )
else:
snake_case_ : Optional[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , _SCREAMING_SNAKE_CASE )
return metrics
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "test" ) -> Tuple:
snake_case_ : Optional[Any] = self.get_test_dataloader(_SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : List[Any] = self.compute_metrics
snake_case_ : Dict = None
snake_case_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : int = eval_loop(
_SCREAMING_SNAKE_CASE , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , )
finally:
snake_case_ : Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ : Dict = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions , "predict" )
snake_case_ : Optional[Any] = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
snake_case_ : Tuple = metrics.pop(_SCREAMING_SNAKE_CASE )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE="./" ) -> Optional[int]:
snake_case_ : Dict = self.eval_dataset
snake_case_ : Union[str, Any] = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = next(iter(_SCREAMING_SNAKE_CASE ) )
# saving device - to make it consistent
snake_case_ : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
snake_case_ : List[str] = tuple(v.to(_SCREAMING_SNAKE_CASE ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
snake_case_ : int = True
snake_case_ : List[Any] = self.model.to(_SCREAMING_SNAKE_CASE )
model.eval()
model.float()
snake_case_ : int = model.module if hasattr(_SCREAMING_SNAKE_CASE , "module" ) else model
quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args )
snake_case_ : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , "model.onnx" )
logger.info(f'''exporting model to {output_model_file}''' )
snake_case_ : List[str] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , export_params=_SCREAMING_SNAKE_CASE , opset_version=13 , do_constant_folding=_SCREAMING_SNAKE_CASE , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=_SCREAMING_SNAKE_CASE , )
logger.info("onnx export finished" )
| 568
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : Dict , _a : Optional[int] , _a : str ):
snake_case_ : int = s.rsplit(_a , _a )
return new.join(_a )
def lowerCAmelCase__ ( _a : Optional[int] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : Dict = {}
snake_case_ : List[Any] = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
snake_case_ : Tuple = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
snake_case_ : str = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
snake_case_ : List[Any] = rreplace(_a , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
snake_case_ : List[Any] = rreplace(_a , ".b" , ".bias" , 1 )
snake_case_ : Dict = value.float()
return upgrade
@torch.no_grad()
def lowerCAmelCase__ ( _a : List[Any] , _a : Optional[int] , _a : Optional[Any]=None , _a : Optional[int]=True ):
from dall_e import Encoder
snake_case_ : Optional[Any] = Encoder()
if os.path.exists(_a ):
snake_case_ : Tuple = torch.load(_a )
else:
snake_case_ : Any = torch.hub.load_state_dict_from_url(_a )
if isinstance(_a , _a ):
snake_case_ : Optional[Any] = ckpt.state_dict()
encoder.load_state_dict(_a )
if config_path is not None:
snake_case_ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_a )
else:
snake_case_ : Union[str, Any] = FlavaImageCodebookConfig()
snake_case_ : int = FlavaImageCodebook(_a ).eval()
snake_case_ : int = encoder.state_dict()
snake_case_ : Optional[int] = upgrade_state_dict(_a )
hf_model.load_state_dict(_a )
snake_case_ : int = hf_model.state_dict()
snake_case_ : Optional[Any] = count_parameters(_a )
snake_case_ : Union[str, Any] = count_parameters(_a )
assert torch.allclose(_a , _a , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_a )
else:
return hf_state_dict
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 568
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : str = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _A ):
'''simple docstring'''
lowerCamelCase_ :Dict = '''rwkv'''
lowerCamelCase_ :Tuple = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , snake_case_=5_0_2_7_7 , snake_case_=1_0_2_4 , snake_case_=4_0_9_6 , snake_case_=3_2 , snake_case_=None , snake_case_=None , snake_case_=1E-5 , snake_case_=0 , snake_case_=0 , snake_case_=6 , snake_case_=False , snake_case_=True , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[int] = context_length
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : List[str] = layer_norm_epsilon
UpperCAmelCase_ : Tuple = rescale_every
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : List[str] = eos_token_id
super().__init__(
tie_word_embeddings=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
| 713
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = logging.get_verbosity()
UpperCAmelCase_ : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : int = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : Optional[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_ )
UpperCAmelCase_ : List[Any] = logging.log_levels[env_level_str]
UpperCAmelCase_ : Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCAmelCase_ : Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : Tuple = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : List[Any] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 389
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__UpperCAmelCase = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
__UpperCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCAmelCase_ ( lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="m2m100", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=8, **SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase : List[Any] = language_codes
UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCamelCase : Union[str, Any] = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
UpperCamelCase : int = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase, tgt_lang=__lowerCAmelCase, bos_token=__lowerCAmelCase, eos_token=__lowerCAmelCase, sep_token=__lowerCAmelCase, unk_token=__lowerCAmelCase, pad_token=__lowerCAmelCase, language_codes=__lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, num_madeup_words=__lowerCAmelCase, **__lowerCAmelCase, )
UpperCamelCase : Optional[int] = vocab_file
UpperCamelCase : int = load_json(__lowerCAmelCase )
UpperCamelCase : Any = {v: k for k, v in self.encoder.items()}
UpperCamelCase : List[Any] = spm_file
UpperCamelCase : Optional[int] = load_spm(__lowerCAmelCase, self.sp_model_kwargs )
UpperCamelCase : Optional[int] = len(self.encoder )
UpperCamelCase : Optional[int] = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
UpperCamelCase : Optional[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
UpperCamelCase : str = {v: k for k, v in self.lang_token_to_id.items()}
UpperCamelCase : Optional[int] = src_lang if src_lang is not None else 'en'
UpperCamelCase : Dict = tgt_lang
UpperCamelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCamelCase : Union[str, Any] = num_madeup_words
@property
def snake_case_ ( self ) -> Any:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return self.sp_model.encode(__lowerCAmelCase, out_type=__lowerCAmelCase )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase, self.encoder[self.unk_token] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase, self.unk_token )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : str = []
UpperCamelCase : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
UpperCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase, token_ids_a=__lowerCAmelCase, already_has_special_tokens=__lowerCAmelCase )
UpperCamelCase : List[Any] = [1] * len(self.prefix_tokens )
UpperCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Dict:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Tuple = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : List[Any] = None
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : str = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : int = {}
UpperCamelCase : Dict = load_spm(self.spm_file, self.sp_model_kwargs )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
UpperCamelCase : List[Any] = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
UpperCamelCase : str = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
UpperCamelCase : Tuple = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder, __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase, 'wb' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "en", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "ro", **SCREAMING_SNAKE_CASE_, ) -> List[str]:
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase, __lowerCAmelCase, **__lowerCAmelCase )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Dict = self(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase, **__lowerCAmelCase )
UpperCamelCase : Tuple = self.get_lang_id(__lowerCAmelCase )
UpperCamelCase : Optional[int] = tgt_lang_id
return inputs
def snake_case_ ( self ) -> Any:
self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> int:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Any = self.get_lang_token(__lowerCAmelCase )
UpperCamelCase : Optional[Any] = self.lang_token_to_id[lang_token]
UpperCamelCase : Tuple = [self.cur_lang_id]
UpperCamelCase : Union[str, Any] = [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : List[Any] = self.get_lang_token(__lowerCAmelCase )
UpperCamelCase : Optional[Any] = self.lang_token_to_id[lang_token]
UpperCamelCase : Optional[int] = [self.cur_lang_id]
UpperCamelCase : List[str] = [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
return self.lang_code_to_token[lang]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[Any] = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def UpperCamelCase ( snake_case__ : str , snake_case__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
UpperCamelCase : Optional[int] = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def UpperCamelCase ( snake_case__ : str ) -> Union[Dict, List]:
with open(_A , 'r' ) as f:
return json.load(_A )
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> None:
with open(_A , 'w' ) as f:
json.dump(_A , _A , indent=2 )
| 40
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = KandinskyImgaImgPipeline
UpperCAmelCase_ : Union[str, Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase_ : List[str] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase_ : Dict = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase_ : str = False
@property
def a_ ( self):
"""simple docstring"""
return 32
@property
def a_ ( self):
"""simple docstring"""
return 32
@property
def a_ ( self):
"""simple docstring"""
return self.time_input_dim
@property
def a_ ( self):
"""simple docstring"""
return self.time_input_dim * 4
@property
def a_ ( self):
"""simple docstring"""
return 100
@property
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""")
return tokenizer
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCAmelCase = MultilingualCLIP(__lowerCAmelCase)
lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase = UNetaDConditionModel(**__lowerCAmelCase)
return model
@property
def a_ ( self):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs)
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCAmelCase = DDIMScheduler(**__lowerCAmelCase)
lowerCAmelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(__lowerCAmelCase)
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase)).convert("""RGB""").resize((256, 256))
if str(__lowerCAmelCase).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = pipe(**self.get_dummy_inputs(__lowerCAmelCase))
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase) , return_dict=__lowerCAmelCase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""")
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
lowerCAmelCase = """A red cartoon frog, 4k"""
lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(__lowerCAmelCase)
lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa)
lowerCAmelCase = pipeline.to(__lowerCAmelCase)
pipeline.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.Generator(device="""cpu""").manual_seed(0)
lowerCAmelCase , lowerCAmelCase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase)
| 370
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case_ ( A_ : int, A_ : int, A_ : int, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCamelCase : Union[str, Any] = ksize + 1
_lowerCamelCase : List[Any] = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
_lowerCamelCase : Dict = x - ksize // 2
_lowerCamelCase : int = y - ksize // 2
# degree to radiant
_lowerCamelCase : int = theta / 1_80 * np.pi
_lowerCamelCase : Dict = np.cos(_theta )
_lowerCamelCase : int = np.sin(_theta )
# get kernel x
_lowerCamelCase : Optional[Any] = cos_theta * px + sin_theta * py
# get kernel y
_lowerCamelCase : int = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCamelCase : str = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase__ = out / out.max() * 255
lowerCAmelCase__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 598
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase__ = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = cn.convert_to_negative(A_ )
# assert negative_img array for at least one True
assert negative_img.any()
def snake_case_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(A_, 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = imread('''digital_image_processing/image_data/lena_small.jpg''', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCamelCase : Tuple = canny.canny(A_ )
# assert canny array for at least one True
assert canny_array.any()
def snake_case_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(A_, 5, sigma=0.9 ).all()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCamelCase : Optional[int] = conv.img_convolve(A_, A_ ).astype(A_ )
assert res.any()
def snake_case_ ( ):
'''simple docstring'''
assert med.median_filter(A_, 3 ).any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = sob.sobel_filter(A_ )
assert grad.any() and theta.any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = sp.make_sepia(A_, 20 )
assert sepia.all()
def snake_case_ ( A_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
_lowerCamelCase : Tuple = bs.Burkes(imread(A_, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def snake_case_ ( A_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
'''simple docstring'''
_lowerCamelCase : str = rs.NearestNeighbour(imread(A_, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
_lowerCamelCase : Optional[int] = imread(A_, 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = image[x_coordinate][y_coordinate]
_lowerCamelCase : List[Any] = lbp.get_neighbors_pixel(
A_, A_, A_, A_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCamelCase : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
_lowerCamelCase : Union[str, Any] = lbp.local_binary_value(A_, A_, A_ )
assert lbp_image.any()
| 598
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "efficientnet"
def __init__( self : Optional[Any] , __snake_case : int = 3 , __snake_case : int = 6_0_0 , __snake_case : float = 2.0 , __snake_case : float = 3.1 , __snake_case : int = 8 , __snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case : List[int] = [] , __snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case : float = 0.25 , __snake_case : str = "swish" , __snake_case : int = 2_5_6_0 , __snake_case : str = "mean" , __snake_case : float = 0.02 , __snake_case : float = 0.001 , __snake_case : float = 0.99 , __snake_case : float = 0.5 , __snake_case : float = 0.2 , **__snake_case : List[Any] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: str = num_channels
__magic_name__: List[str] = image_size
__magic_name__: List[str] = width_coefficient
__magic_name__: Optional[Any] = depth_coefficient
__magic_name__: Tuple = depth_divisor
__magic_name__: Dict = kernel_sizes
__magic_name__: int = in_channels
__magic_name__: str = out_channels
__magic_name__: Dict = depthwise_padding
__magic_name__: Union[str, Any] = strides
__magic_name__: Dict = num_block_repeats
__magic_name__: Tuple = expand_ratios
__magic_name__: List[str] = squeeze_expansion_ratio
__magic_name__: Any = hidden_act
__magic_name__: Tuple = hidden_dim
__magic_name__: int = pooling_type
__magic_name__: int = initializer_range
__magic_name__: List[str] = batch_norm_eps
__magic_name__: str = batch_norm_momentum
__magic_name__: List[str] = dropout_rate
__magic_name__: Dict = drop_connect_rate
__magic_name__: Optional[Any] = sum(__snake_case ) * 4
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
return 1E-5
| 96
| 1
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowercase__ : Any , **lowercase__ : Union[str, Any] ):
super().__init__(*lowercase__ , **lowercase__ )
def snake_case ( self : Any , lowercase__ : Dict , lowercase__ : Dict ):
__lowercase : Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase__ )
__lowercase : Union[str, Any] = self.values[key]
def snake_case ( self : str ):
return (
sum(self.charge_factor - len(lowercase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case ( self : Union[str, Any] , lowercase__ : List[str] , lowercase__ : str=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase__ ) == 0
):
return key
return super()._collision_resolution(lowercase__ , lowercase__ )
| 281
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : int=1_3 , lowercase__ : Optional[int]=7 , lowercase__ : Any=True , lowercase__ : int=True , lowercase__ : List[Any]=True , lowercase__ : Union[str, Any]=True , lowercase__ : Any=9_9 , lowercase__ : Tuple=[1, 1, 2] , lowercase__ : str=1 , lowercase__ : Union[str, Any]=3_2 , lowercase__ : int=4 , lowercase__ : Dict=8 , lowercase__ : Tuple=3_7 , lowercase__ : int="gelu_new" , lowercase__ : Tuple=0.1 , lowercase__ : int=0.1 , lowercase__ : Dict=0.0 , lowercase__ : int=5_1_2 , lowercase__ : str=3 , lowercase__ : List[Any]=0.0_2 , lowercase__ : Any=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=None , lowercase__ : List[Any]=False , ):
__lowercase : Any = parent
__lowercase : Tuple = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : List[Any] = is_training
__lowercase : Tuple = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Any = vocab_size
__lowercase : Union[str, Any] = block_sizes
__lowercase : Optional[Any] = num_decoder_layers
__lowercase : str = d_model
__lowercase : Tuple = n_head
__lowercase : Any = d_head
__lowercase : Dict = d_inner
__lowercase : Optional[Any] = hidden_act
__lowercase : int = hidden_dropout
__lowercase : int = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : int = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Union[str, Any] = 2
__lowercase : Optional[int] = num_labels
__lowercase : List[str] = num_choices
__lowercase : List[Any] = scope
__lowercase : List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase : str = n_head
# Used in the tests to check the size of the first hidden state
__lowercase : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase : Dict = self.num_hidden_layers + 2
def snake_case ( self : List[Any] ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = None
if self.use_input_mask:
__lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[str] = None
if self.use_token_type_ids:
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Any = None
__lowercase : str = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Union[str, Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : Optional[Any] = [input_ids, input_mask]
__lowercase : int = model(lowercase__ )
__lowercase : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : int = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : List[str] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : str = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Dict = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : str , lowercase__ : Dict , lowercase__ : Union[str, Any] , ):
__lowercase : List[str] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : List[Any] = [input_ids, input_mask]
__lowercase : Optional[int] = model(lowercase__ )
__lowercase : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase : Any = False
__lowercase : Any = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase : List[Any] = False
__lowercase : Optional[int] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[Any] , ):
__lowercase : Tuple = TFFunnelForPreTraining(config=lowercase__ )
__lowercase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , ):
__lowercase : Optional[int] = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : int , ):
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any] , ):
__lowercase : Dict = self.num_choices
__lowercase : List[str] = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase : Any = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[Any] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Tuple = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowercase : str = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Any , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : Tuple = self.num_labels
__lowercase : int = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : int , ):
__lowercase : List[str] = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : str ):
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Union[str, Any] = False
def snake_case ( self : Dict ):
__lowercase : List[Any] = TFFunnelModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def snake_case ( self : str ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def snake_case ( self : Any ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def snake_case ( self : str ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = TFFunnelModelTester(self , base=lowercase__ )
__lowercase : List[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def snake_case ( self : Union[str, Any] ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 281
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__:List[str] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def _lowerCamelCase( a ):
assert type(UpperCamelCase_ ) in (int, float) and decimal == int(UpperCamelCase_ )
__a = int(UpperCamelCase_ )
__a = """"""
__a = False
if decimal < 0:
__a = True
decimal *= -1
while decimal > 0:
__a = divmod(UpperCamelCase_ , 1_6 )
__a = values[remainder] + hexadecimal
__a = """0x""" + hexadecimal
if negative:
__a = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCamelCase : Optional[Any] = {
"169M": 1_2,
"430M": 2_4,
"1B5": 2_4,
"3B": 3_2,
"7B": 3_2,
"14B": 4_0,
}
_lowerCamelCase : int = {
"169M": 7_6_8,
"430M": 1_0_2_4,
"1B5": 2_0_4_8,
"3B": 2_5_6_0,
"7B": 4_0_9_6,
"14B": 5_1_2_0,
}
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = list(state_dict.keys() )
for name in state_dict_keys:
_lowerCAmelCase : str = state_dict.pop(UpperCamelCase_ )
# emb -> embedding
if name.startswith("""emb.""" ):
_lowerCAmelCase : str = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_lowerCAmelCase : Tuple = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_lowerCAmelCase : Dict = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase_ )
# ffn -> feed_forward
_lowerCAmelCase : List[Any] = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_lowerCAmelCase : Dict = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_lowerCAmelCase : Any = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_lowerCAmelCase : List[str] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_lowerCAmelCase : Optional[int] = """rwkv.""" + name
_lowerCAmelCase : List[Any] = weight
return state_dict
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_lowerCAmelCase : List[str] = 50277
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_lowerCAmelCase : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase_ )
_lowerCAmelCase : int = len(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
# 2. Build the config
_lowerCAmelCase : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_lowerCAmelCase : Any = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
_lowerCAmelCase : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase_ )
# 3. Download model file then convert state_dict
_lowerCAmelCase : Optional[Any] = hf_hub_download(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = torch.load(UpperCamelCase_ , map_location="""cpu""" )
_lowerCAmelCase : Any = convert_state_dict(UpperCamelCase_ )
# 4. Split in shards and save
_lowerCAmelCase , _lowerCAmelCase : Dict = shard_checkpoint(UpperCamelCase_ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
if index is not None:
_lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# Save the index as well
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[Any] = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + """\n"""
f.write(UpperCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_lowerCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_lowerCAmelCase : int = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 429
| 0
|
from collections import deque
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
A_ : int = process_name # process name
A_ : str = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A_ : List[Any] = arrival_time
A_ : Dict = burst_time # remaining burst time
A_ : List[Any] = 0 # total time of the process wait in ready queue
A_ : Any = 0 # time from arrival time to completion time
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
"""simple docstring"""
A_ : int = number_of_queues
# time slice of queues that round robin algorithm applied
A_ : int = time_slices
# unfinished process is in this ready_queue
A_ : Optional[int] = queue
# current time
A_ : Optional[Any] = current_time
# finished process is in this sequence queue
A_ : deque[Process] = deque()
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : int = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : deque[Process] = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
A_ : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A_ : int = 0
# set the process's turnaround time because it is finished
A_ : str = self.current_time - cp.arrival_time
# set the completion time
A_ : List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
A_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
A_ : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A_ : Tuple = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A_ : Optional[Any] = 0
# set the finish time
A_ : Optional[int] = self.current_time
# update the process' turnaround time because it is finished
A_ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
A_ : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowerCAmelCase = Process('P1', 0, 5_3)
_lowerCAmelCase = Process('P2', 0, 1_7)
_lowerCAmelCase = Process('P3', 0, 6_8)
_lowerCAmelCase = Process('P4', 0, 2_4)
_lowerCAmelCase = 3
_lowerCAmelCase = [1_7, 2_5]
_lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowerCAmelCase = Process('P1', 0, 5_3)
_lowerCAmelCase = Process('P2', 0, 1_7)
_lowerCAmelCase = Process('P3', 0, 6_8)
_lowerCAmelCase = Process('P4', 0, 2_4)
_lowerCAmelCase = 3
_lowerCAmelCase = [1_7, 2_5]
_lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
_lowerCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
_lowerCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 713
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
import os
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : str = os.path.join(os.path.dirname(snake_case__ ) , 'num.txt' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = None
def UpperCAmelCase_ ( _A , _A=0.9_9_9 , _A="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ = []
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Dict , __lowerCamelCase : int = 1000 , __lowerCamelCase : float = 0.0001 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : str = "linear" , __lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : float = 1.0 , **__lowerCamelCase : str , ) -> Optional[Any]:
if kwargs.get('''set_alpha_to_one''' , __lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE__ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __lowerCamelCase , standard_warn=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ = 1.0
# setable values
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.arange(0 , __lowerCamelCase ).copy().astype(np.intaa ) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowercase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ) -> Union[str, Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ = num_inference_steps
SCREAMING_SNAKE_CASE__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(0 , __lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def lowercase_ ( self : List[Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
SCREAMING_SNAKE_CASE__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ = model_output
SCREAMING_SNAKE_CASE__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def __len__( self : List[str] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 493
| 0
|
from __future__ import annotations
from math import gcd
def _A ( _UpperCamelCase , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
return (pow(_UpperCamelCase , 2 ) + step) % modulus
for _ in range(_UpperCamelCase ):
# These track the position within the cycle detection logic.
_UpperCAmelCase : List[Any] = seed
_UpperCAmelCase : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_UpperCAmelCase : str = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Dict = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Any = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_UpperCAmelCase : Any = gcd(hare - tortoise , _UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_UpperCAmelCase : Any = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
UpperCAmelCase__ : Any = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 416
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( lowercase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """WhisperFeatureExtractor"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """WhisperTokenizer"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[str] = self.feature_extractor
_UpperCAmelCase : Any = False
def a_ ( self : Any , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=True ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase_ , language=UpperCAmelCase_ , no_timestamps=UpperCAmelCase_ )
def __call__( self : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = kwargs.pop('''audio''' , UpperCAmelCase_ )
_UpperCAmelCase : Tuple = kwargs.pop('''sampling_rate''' , UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kwargs.pop('''text''' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
_UpperCAmelCase : List[Any] = args[0]
_UpperCAmelCase : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_UpperCAmelCase : List[str] = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase : Union[str, Any] = encodings['''input_ids''']
return inputs
def a_ ( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def a_ ( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]="np" ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
| 416
| 1
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase_ = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Dict=None ) -> Any:
SCREAMING_SNAKE_CASE__ :Tuple = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] )
SCREAMING_SNAKE_CASE__ :int = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , _SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = BertEncoderWithPabee(UpperCamelCase_ )
self.init_weights()
SCREAMING_SNAKE_CASE__ :Optional[Any] = 0
SCREAMING_SNAKE_CASE__ :List[str] = 0
SCREAMING_SNAKE_CASE__ :List[Any] = 0
SCREAMING_SNAKE_CASE__ :List[Any] = 0
def __lowerCamelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[Any] = threshold
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ :Tuple = patience
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = 0
SCREAMING_SNAKE_CASE__ :List[Any] = 0
def __lowerCamelCase ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ :List[str] = self.inference_layers_num / self.inference_instances_num
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __lowerCamelCase ( self : int , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : int=False , ) -> Any:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
SCREAMING_SNAKE_CASE__ :str = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE__ :Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
SCREAMING_SNAKE_CASE__ :Dict = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE__ :Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
SCREAMING_SNAKE_CASE__ :Dict = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE__ :torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = encoder_hidden_states.size()
SCREAMING_SNAKE_CASE__ :Optional[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.invert_attention_mask(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ :str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE__ :List[str] = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = embedding_output
if self.training:
SCREAMING_SNAKE_CASE__ :str = []
for i in range(self.config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ :Dict = self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.pooler(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = output_layers[i](output_dropout(UpperCamelCase_ ) )
res.append(UpperCamelCase_ )
elif self.patience == 0: # Use all layers for inference
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :Dict = self.pooler(encoder_outputs[0] )
SCREAMING_SNAKE_CASE__ :Any = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )]
else:
SCREAMING_SNAKE_CASE__ :Any = 0
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :Optional[int] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
SCREAMING_SNAKE_CASE__ :str = self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.pooler(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = output_layers[i](UpperCamelCase_ )
if regression:
SCREAMING_SNAKE_CASE__ :List[Any] = logits.detach()
if patient_result is not None:
SCREAMING_SNAKE_CASE__ :Optional[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 0
else:
SCREAMING_SNAKE_CASE__ :Optional[int] = logits.detach().argmax(dim=1 )
if patient_result is not None:
SCREAMING_SNAKE_CASE__ :Dict = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ):
patient_counter += 1
else:
SCREAMING_SNAKE_CASE__ :Dict = 0
SCREAMING_SNAKE_CASE__ :List[str] = logits
if patient_counter == self.patience:
break
SCREAMING_SNAKE_CASE__ :List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , _SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , UpperCamelCase_ : int ) -> Optional[Any]:
super().__init__(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = config.num_labels
SCREAMING_SNAKE_CASE__ :Optional[int] = BertModelWithPabee(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ :str = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Any = self.bert(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
SCREAMING_SNAKE_CASE__ :Optional[int] = (logits[-1],)
if labels is not None:
SCREAMING_SNAKE_CASE__ :Tuple = None
SCREAMING_SNAKE_CASE__ :Dict = 0
for ix, logits_item in enumerate(UpperCamelCase_ ):
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ :List[Any] = MSELoss()
SCREAMING_SNAKE_CASE__ :Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ :List[str] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ :int = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
SCREAMING_SNAKE_CASE__ :List[str] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
SCREAMING_SNAKE_CASE__ :Any = (total_loss / total_weights,) + outputs
return outputs
| 209
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase__ ) )
] # the reference grid
SCREAMING_SNAKE_CASE__ :Any = 1
SCREAMING_SNAKE_CASE__ :Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase__ ) )
] # the action grid
SCREAMING_SNAKE_CASE__ :int = init[0]
SCREAMING_SNAKE_CASE__ :Optional[Any] = init[1]
SCREAMING_SNAKE_CASE__ :List[str] = 0
SCREAMING_SNAKE_CASE__ :List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE__ :List[Any] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE__ :Any = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE__ :str = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCAmelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE__ :List[Any] = cell.pop()
SCREAMING_SNAKE_CASE__ :Optional[int] = next_cell[2]
SCREAMING_SNAKE_CASE__ :Any = next_cell[3]
SCREAMING_SNAKE_CASE__ :Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE__ :Tuple = True
else:
for i in range(len(UpperCAmelCase__ ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE__ :Optional[int] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE__ :int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCAmelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE__ :str = g + cost
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ :Any = i
SCREAMING_SNAKE_CASE__ :int = []
SCREAMING_SNAKE_CASE__ :Union[str, Any] = goal[0]
SCREAMING_SNAKE_CASE__ :Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE__ :Optional[Any] = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE__ :Optional[int] = xa
SCREAMING_SNAKE_CASE__ :int = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE__ :int = []
for i in range(len(UpperCAmelCase__ ) ):
path.append(invpath[len(UpperCAmelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 99
UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 209
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 1_0
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = [1, 2, 3, 4]
__SCREAMING_SNAKE_CASE : Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
__SCREAMING_SNAKE_CASE : Any = process_story(__A )
self.assertEqual(__A , [] )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = ""
__SCREAMING_SNAKE_CASE : Tuple = process_story(__A )
self.assertEqual(__A , [] )
self.assertEqual(__A , [] )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
__SCREAMING_SNAKE_CASE : Tuple = process_story(__A )
__SCREAMING_SNAKE_CASE : Optional[Any] = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__A , __A )
__SCREAMING_SNAKE_CASE : Optional[Any] = ["It was the best of times."]
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3, 4] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__A , 0 ).numpy() , expected.numpy() )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A , 2_3 ).numpy() , expected.numpy() )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A , 1 ).numpy() , expected.numpy() )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0_1
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = compute_token_type_ids(__A , __A )
np.testing.assert_array_equal(__A , __A )
| 703
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : List[Any] = SwinvaConfig()
__SCREAMING_SNAKE_CASE : List[Any] = swinva_name.split("""_""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
__SCREAMING_SNAKE_CASE : Dict = int(name_split[3][-3:] )
else:
__SCREAMING_SNAKE_CASE : str = int(name_split[3] )
if "to" in name_split[2]:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[2][-2:] )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE : Dict = 96
__SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE : List[str] = 96
__SCREAMING_SNAKE_CASE : int = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE : int = 1_28
__SCREAMING_SNAKE_CASE : str = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE : List[str] = 1_92
__SCREAMING_SNAKE_CASE : Union[str, Any] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
__SCREAMING_SNAKE_CASE : int = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__SCREAMING_SNAKE_CASE : int = 2_18_41
__SCREAMING_SNAKE_CASE : str = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : List[str] = """imagenet-22k-id2label.json"""
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE : str = 10_00
__SCREAMING_SNAKE_CASE : Optional[int] = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : Any = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = img_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_classes
__SCREAMING_SNAKE_CASE : int = embed_dim
__SCREAMING_SNAKE_CASE : Dict = depths
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : int = window_size
return config
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE : str = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE : Tuple = """layernorm.weight"""
if name == "norm.bias":
__SCREAMING_SNAKE_CASE : Optional[int] = """layernorm.bias"""
if "head" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""head""" , """classifier""" )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = """swinv2.""" + name
return name
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Optional[Any] ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )
__SCREAMING_SNAKE_CASE : List[str] = int(key_split[1] )
__SCREAMING_SNAKE_CASE : Dict = int(key_split[3] )
__SCREAMING_SNAKE_CASE : str = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
__SCREAMING_SNAKE_CASE : str = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
__SCREAMING_SNAKE_CASE : int = get_swinva_config(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = SwinvaForImageClassification(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : int = timm_model(inputs["""pixel_values"""] )
__SCREAMING_SNAKE_CASE : Dict = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 178
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 687
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 687
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand()
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
def A_ ( a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = huggingface_hub.__version__
_UpperCamelCase = """not installed"""
_UpperCamelCase = """NA"""
if is_torch_available():
import torch
_UpperCamelCase = torch.__version__
_UpperCamelCase = torch.cuda.is_available()
_UpperCamelCase = """not installed"""
if is_transformers_available():
import transformers
_UpperCamelCase = transformers.__version__
_UpperCamelCase = """not installed"""
if is_accelerate_available():
import accelerate
_UpperCamelCase = accelerate.__version__
_UpperCamelCase = """not installed"""
if is_xformers_available():
import xformers
_UpperCamelCase = xformers.__version__
_UpperCamelCase = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(a ) )
return info
@staticmethod
def A_ ( a ) -> Dict:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 202
| 1
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
__lowerCAmelCase : Optional[Any] ="""us-east-1""" # defaults region
@dataclass
class _A :
snake_case__ : str
snake_case__ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
snake_case__ : List[str] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
snake_case__ : Optional[Any] = {**hyperparameters, 'max_steps': 1000}
@property
def A__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def A__ ( self ):
"""simple docstring"""
return f'{self.framework}-transfromers-test'
@property
def A__ ( self ):
"""simple docstring"""
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def A__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Dict:
'''simple docstring'''
lowercase = SageMakerTestEnvironment(framework=request.cls.framework )
| 359
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : List[str] =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = None
lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase = os.path.abspath("""examples""" )
for item in os.listdir(__lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCAmelCase , feature_script=__lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase = compare_against_test(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = """\n""".join(__lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase = diff.replace(__lowerCAmelCase , """""" )
self.assertEqual(__lowerCAmelCase , """""" )
def A__ ( self ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _A ( lowerCAmelCase ):
snake_case__ : Any = False
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
else:
lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
lowercase = re.findall("""({.+})""" , __lowerCAmelCase )
lowercase = [r for r in results if """accuracy""" in r][-1]
lowercase = ast.literal_eval(__lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , """tracking""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 359
| 1
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase__ = load_file(UpperCAmelCase__ )
lowerCAmelCase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowerCAmelCase__ = pipeline.text_encoder
else:
lowerCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowerCAmelCase__ = pipeline.unet
# find the target layer
lowerCAmelCase__ = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
lowerCAmelCase__ = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
lowerCAmelCase__ = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase__ = layer_infos.pop(0 )
lowerCAmelCase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Tuple = args.base_model_path
__lowerCAmelCase : Dict = args.checkpoint_path
__lowerCAmelCase : Union[str, Any] = args.dump_path
__lowerCAmelCase : Dict = args.lora_prefix_unet
__lowerCAmelCase : Tuple = args.lora_prefix_text_encoder
__lowerCAmelCase : List[str] = args.alpha
__lowerCAmelCase : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCAmelCase : str = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 708
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Optional[int] ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__lowerCAmelCase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 674
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 23
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ["text", "image", "audio"]
def snake_case ( _a: List[str] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_a , _a ):
inputs.append(create_inputs(_a ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def snake_case ( _a: List )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = []
for output in outputs:
if isinstance(_a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class _a :
def _UpperCamelCase ( self : List[str] ):
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , SCREAMING_SNAKE_CASE__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase__ = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE__ ) , self.tool.outputs )
def _UpperCamelCase ( self : str ):
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.outputs ):
lowerCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
| 510
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =0
@slow
def UpperCamelCase__ ( self :str):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case, (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(snake_case), 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
self.assertIsInstance(snake_case, (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(snake_case), 0)
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsInstance(snake_case, (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size, 12)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsInstance(snake_case, (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size, 20)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =AutoConfig.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
# Check that tokenizer_type ≠ model_type
_lowercase =AutoTokenizer.from_pretrained(snake_case, config=snake_case)
self.assertIsInstance(snake_case, (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size, 12)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt', os.path.join(snake_case, 'vocab.txt'))
_lowercase =AutoTokenizer.from_pretrained(snake_case, tokenizer_type='bert', use_fast=snake_case)
self.assertIsInstance(snake_case, snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json', os.path.join(snake_case, 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt', os.path.join(snake_case, 'merges.txt'))
_lowercase =AutoTokenizer.from_pretrained(snake_case, tokenizer_type='gpt2', use_fast=snake_case)
self.assertIsInstance(snake_case, snake_case)
@require_tokenizers
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt', os.path.join(snake_case, 'vocab.txt'))
_lowercase =AutoTokenizer.from_pretrained(snake_case, tokenizer_type='bert')
self.assertIsInstance(snake_case, snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json', os.path.join(snake_case, 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt', os.path.join(snake_case, 'merges.txt'))
_lowercase =AutoTokenizer.from_pretrained(snake_case, tokenizer_type='gpt2')
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
with pytest.raises(snake_case):
AutoTokenizer.from_pretrained('./', tokenizer_type='xxx')
@require_tokenizers
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_lowercase =tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(snake_case, (BertTokenizer, BertTokenizerFast))
if isinstance(snake_case, snake_case):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case, snake_case)
else:
self.assertEqual(tokenizer.do_lower_case, snake_case)
self.assertEqual(tokenizer.model_max_length, 512)
@require_tokenizers
def UpperCamelCase__ ( self :int):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
snake_case, 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier', ):
_lowercase =tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =TOKENIZER_MAPPING.values()
_lowercase =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(snake_case)
@require_tokenizers
def UpperCamelCase__ ( self :str):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased', use_fast=snake_case), snake_case)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased'), snake_case)
@require_tokenizers
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('distilbert-base-uncased', do_lower_case=snake_case)
_lowercase ='Hello, world. How are you?'
_lowercase =tokenizer.tokenize(snake_case)
self.assertEqual('[UNK]', tokens[0])
_lowercase =AutoTokenizer.from_pretrained('microsoft/mpnet-base', do_lower_case=snake_case)
_lowercase =tokenizer.tokenize(snake_case)
self.assertEqual('[UNK]', tokens[0])
@require_tokenizers
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(snake_case), snake_case)
self.assertEqual(tokenizer.model_max_length, 512)
self.assertEqual(tokenizer.vocab_size, 3_0000)
self.assertEqual(tokenizer.unk_token, '[UNK]')
self.assertEqual(tokenizer.padding_side, 'right')
self.assertEqual(tokenizer.truncation_side, 'right')
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsInstance(snake_case, (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case)
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsInstance(snake_case, tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size, 12)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =get_tokenizer_config('bert-base-cased')
_lowercase =config.pop('_commit_hash', snake_case)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(snake_case, {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
_lowercase =get_tokenizer_config(snake_case)
self.assertDictEqual(snake_case, {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_lowercase =AutoTokenizer.from_pretrained(snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case)
_lowercase =get_tokenizer_config(snake_case)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'], 'BertTokenizer')
def UpperCamelCase__ ( self :str):
"""simple docstring"""
try:
AutoConfig.register('custom', snake_case)
AutoTokenizer.register(snake_case, slow_tokenizer_class=snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case):
AutoTokenizer.register(snake_case, slow_tokenizer_class=snake_case)
_lowercase =CustomTokenizer.from_pretrained(snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case)
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
try:
AutoConfig.register('custom', snake_case)
# Can register in two steps
AutoTokenizer.register(snake_case, slow_tokenizer_class=snake_case)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, None))
AutoTokenizer.register(snake_case, fast_tokenizer_class=snake_case)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
snake_case, slow_tokenizer_class=snake_case, fast_tokenizer_class=snake_case)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case):
AutoTokenizer.register(snake_case, fast_tokenizer_class=snake_case)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase =BertTokenizerFast.from_pretrained(snake_case)
bert_tokenizer.save_pretrained(snake_case)
_lowercase =CustomTokenizerFast.from_pretrained(snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case)
_lowercase =AutoTokenizer.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
_lowercase =AutoTokenizer.from_pretrained(snake_case, use_fast=snake_case)
self.assertIsInstance(snake_case, snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with self.assertRaises(snake_case):
_lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case):
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=snake_case)
_lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=snake_case)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case)
_lowercase =AutoTokenizer.from_pretrained(snake_case, trust_remote_code=snake_case)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizerFast')
# Test we can also load the slow version
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=snake_case, use_fast=snake_case)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case)
_lowercase =AutoTokenizer.from_pretrained(snake_case, trust_remote_code=snake_case, use_fast=snake_case)
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizer')
@require_tokenizers
def UpperCamelCase__ ( self :str):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : str =False
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Tuple =NewTokenizer
__lowerCAmelCase : Union[str, Any] =False
try:
AutoConfig.register('custom', snake_case)
AutoTokenizer.register(snake_case, slow_tokenizer_class=snake_case)
AutoTokenizer.register(snake_case, fast_tokenizer_class=snake_case)
# If remote code is not set, the default is to use local
_lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
_lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer', use_fast=snake_case)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=snake_case)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=snake_case, use_fast=snake_case)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=snake_case)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=snake_case, use_fast=snake_case)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy', trust_remote_code=snake_case)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
# Test we can also load the slow version
_lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy', trust_remote_code=snake_case, use_fast=snake_case)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
def UpperCamelCase__ ( self :str):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'bert-base is not a local folder and is not a valid model identifier'):
_lowercase =AutoTokenizer.from_pretrained('bert-base')
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_lowercase =AutoTokenizer.from_pretrained(snake_case, revision='aaaaaa')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
_lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count, 0)
self.assertEqual(counter.head_request_count, 1)
self.assertEqual(counter.other_request_count, 0)
| 557
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def _snake_case (_snake_case : str , _snake_case : str) -> tuple[str, float]:
_lowercase =len([g for position, g in enumerate(_snake_case) if g == main_target[position]])
return (item, float(_snake_case))
def _snake_case (_snake_case : str , _snake_case : str) -> tuple[str, str]:
_lowercase =random.randint(0 , len(_snake_case) - 1)
_lowercase =parent_a[:random_slice] + parent_a[random_slice:]
_lowercase =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _snake_case (_snake_case : str , _snake_case : list[str]) -> str:
_lowercase =list(_snake_case)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
_lowercase =random.choice(_snake_case)
return "".join(_snake_case)
def _snake_case (_snake_case : tuple[str, float] , _snake_case : list[tuple[str, float]] , _snake_case : list[str] , ) -> list[str]:
_lowercase =[]
# Generate more children proportionally to the fitness score.
_lowercase =int(parent_a[1] * 100) + 1
_lowercase =10 if child_n >= 10 else child_n
for _ in range(_snake_case):
_lowercase =population_score[random.randint(0 , _snake_case)][0]
_lowercase , _lowercase =crossover(parent_a[0] , _snake_case)
# Append new string to the population list.
pop.append(mutate(_snake_case , _snake_case))
pop.append(mutate(_snake_case , _snake_case))
return pop
def _snake_case (_snake_case : str , _snake_case : list[str] , _snake_case : bool = True) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_lowercase =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_snake_case)
# Verify that the target contains no genes besides the ones inside genes variable.
_lowercase =sorted({c for c in target if c not in genes})
if not_in_genes_list:
_lowercase =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_snake_case)
# Generate random starting population.
_lowercase =[]
for _ in range(_snake_case):
population.append(''.join([random.choice(_snake_case) for i in range(len(_snake_case))]))
# Just some logs to know what the algorithms is doing.
_lowercase , _lowercase =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_snake_case)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowercase =[evaluate(_snake_case , _snake_case) for item in population]
# Check if there is a matching evolution.
_lowercase =sorted(_snake_case , key=lambda _snake_case: x[1] , reverse=_snake_case)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowercase =population[: int(N_POPULATION / 3)]
population.clear()
population.extend(_snake_case)
# Normalize population score to be between 0 and 1.
_lowercase =[
(item, score / len(_snake_case)) for item, score in population_score
]
# This is selection
for i in range(_snake_case):
population.extend(select(population_score[int(_snake_case)] , _snake_case , _snake_case))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_snake_case) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 557
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
'''simple docstring'''
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A )
| 92
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> Dict:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''mock-s3-bucket'''
SCREAMING_SNAKE_CASE_ : Any =f's3://{mock_bucket}'
SCREAMING_SNAKE_CASE_ : Dict =extract_path_from_uri(UpperCAmelCase_ )
assert dataset_path.startswith('''s3://''' ) is False
SCREAMING_SNAKE_CASE_ : Dict ='''./local/path'''
SCREAMING_SNAKE_CASE_ : str =extract_path_from_uri(UpperCAmelCase_ )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =is_remote_filesystem(UpperCAmelCase_ )
assert is_remote is True
SCREAMING_SNAKE_CASE_ : Dict =fsspec.filesystem('''file''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =is_remote_filesystem(UpperCAmelCase_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
SCREAMING_SNAKE_CASE_ : List[Any] =input_paths[compression_fs_class.protocol]
if input_path is None:
SCREAMING_SNAKE_CASE_ : List[str] =f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =fsspec.filesystem(compression_fs_class.protocol , fo=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =os.path.basename(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f, open(UpperCAmelCase_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) -> Any:
SCREAMING_SNAKE_CASE_ : Tuple ={'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
SCREAMING_SNAKE_CASE_ : List[Any] =compressed_file_paths[protocol]
SCREAMING_SNAKE_CASE_ : str ='''dataset.jsonl'''
SCREAMING_SNAKE_CASE_ : Dict =f'{protocol}://{member_file_path}::{compressed_file_path}'
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Dict =fsspec.get_fs_token_paths(UpperCAmelCase_ )
assert fs.isfile(UpperCAmelCase_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) -> int:
SCREAMING_SNAKE_CASE_ : Dict =hf_api.dataset_info(UpperCAmelCase_ , token=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =HfFileSystem(repo_info=UpperCAmelCase_ , token=UpperCAmelCase_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(UpperCAmelCase_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Tuple ='''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(UpperCAmelCase_ , UpperCAmelCase_ , clobber=UpperCAmelCase_ )
with pytest.warns(UpperCAmelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(UpperCAmelCase_ ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 443
| 0
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ = 16 , lowerCamelCase__ = 88 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 32 , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "geglu" , lowerCamelCase__ = None , ) -> int:
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCamelCase__ , attention_head_dim=lowerCamelCase__ , in_channels=lowerCamelCase__ , num_layers=lowerCamelCase__ , dropout=lowerCamelCase__ , norm_num_groups=lowerCamelCase__ , cross_attention_dim=lowerCamelCase__ , attention_bias=lowerCamelCase__ , sample_size=lowerCamelCase__ , num_vector_embeds=lowerCamelCase__ , activation_fn=lowerCamelCase__ , num_embeds_ada_norm=lowerCamelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__lowerCamelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__lowerCamelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__lowerCamelCase = [1, 0]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = True , ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = hidden_states
__lowerCamelCase = []
__lowerCamelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__lowerCamelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__lowerCamelCase = self.transformer_index_for_condition[i]
__lowerCamelCase = self.transformers[transformer_index](
lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , timestep=lowerCamelCase__ , cross_attention_kwargs=lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__lowerCamelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__lowerCamelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCamelCase__ )
| 715
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__A = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__A = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__lowerCamelCase = SavedModel()
__lowerCamelCase = []
with open(os.path.join(UpperCamelCase__ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCamelCase = json.load(UpperCamelCase__ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(UpperCamelCase__ )] )
with open(UpperCamelCase__ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCamelCase = sorted(UpperCamelCase__ )
__lowerCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCamelCase__ )
if strict and len(UpperCamelCase__ ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(UpperCamelCase__ ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*UpperCamelCase__ , sep='\n' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__A = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 167
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , _a : TransformeraDModel , _a : AutoencoderKL , _a : KarrasDiffusionSchedulers , _a : Optional[Dict[int, str]] = None , ) -> Tuple:
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
__lowerCamelCase : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
__lowerCamelCase : Optional[int] = int(_lowercase )
__lowerCamelCase : List[Any] = dict(sorted(self.labels.items() ) )
def _lowercase ( self : Optional[int] , _a : Union[str, List[str]] ) -> Any:
if not isinstance(_lowercase , _lowercase ):
__lowerCamelCase : Any = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[Any] , _a : List[int] , _a : float = 4.0 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : int = 50 , _a : Optional[str] = "pil" , _a : bool = True , ) -> List[Any]:
__lowerCamelCase : Optional[Any] = len(_lowercase )
__lowerCamelCase : Union[str, Any] = self.transformer.config.sample_size
__lowerCamelCase : List[Any] = self.transformer.config.in_channels
__lowerCamelCase : Optional[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
__lowerCamelCase : List[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowerCamelCase : Tuple = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
__lowerCamelCase : Tuple = torch.tensor([1000] * batch_size , device=self.device )
__lowerCamelCase : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowerCamelCase : Tuple = latent_model_input[: len(_lowercase ) // 2]
__lowerCamelCase : List[str] = torch.cat([half, half] , dim=0 )
__lowerCamelCase : Any = self.scheduler.scale_model_input(_lowercase , _lowercase )
__lowerCamelCase : List[str] = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowerCamelCase : int = latent_model_input.device.type == 'mps'
if isinstance(_lowercase , _lowercase ):
__lowerCamelCase : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
__lowerCamelCase : Tuple = torch.intaa if is_mps else torch.intaa
__lowerCamelCase : Optional[Any] = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowerCamelCase : Union[str, Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowerCamelCase : Tuple = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
__lowerCamelCase ,__lowerCamelCase : List[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowerCamelCase ,__lowerCamelCase : Any = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
__lowerCamelCase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowerCamelCase : str = torch.cat([half_eps, half_eps] , dim=0 )
__lowerCamelCase : Any = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = torch.split(_lowercase , _lowercase , dim=1 )
else:
__lowerCamelCase : Union[str, Any] = noise_pred
# compute previous image: x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = latent_model_input.chunk(2 , dim=0 )
else:
__lowerCamelCase : Union[str, Any] = latent_model_input
__lowerCamelCase : Any = 1 / self.vae.config.scaling_factor * latents
__lowerCamelCase : str = self.vae.decode(_lowercase ).sample
__lowerCamelCase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : str = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 459
|
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655
| 0
|
"""simple docstring"""
from typing import Any
class lowercase__:
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = data
SCREAMING_SNAKE_CASE : Dict = None
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = None
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.head
while temp is not None:
print(temp.data , end=''' ''' )
SCREAMING_SNAKE_CASE : Dict = temp.next
print()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Node(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.head
SCREAMING_SNAKE_CASE : Optional[Any] = new_node
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] ) -> Any:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Optional[Any] = node_a.next
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase__ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 18
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18
| 1
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 201
|
from PIL import Image
def a ( a ) ->Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = image.load()
for i in range(a ):
for j in range(a ):
SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a ):
for i in range(a ):
SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 201
| 1
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _UpperCamelCase ( *UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
with open(UpperCamelCase , "r" ) as fh:
fcntl.flock(UpperCamelCase , fcntl.LOCK_EX )
try:
print(*UpperCamelCase )
finally:
fcntl.flock(UpperCamelCase , fcntl.LOCK_UN )
A = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
A = torch.device("""cuda""", local_rank)
A = socket.gethostname()
A = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
A = dist.get_rank()
A = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 487
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ConsistencyModelPipeline
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def a_ ( self : Any , UpperCamelCase_ : int=False):
"""simple docstring"""
if class_cond:
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
else:
__UpperCAmelCase : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a_ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : str = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : str = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : str = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : int = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : int = None
__UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Tuple = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : int = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : Any=torch.floataa , UpperCamelCase_ : List[str]=(1, 3, 64, 64)):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : int = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__UpperCAmelCase : int = self.get_fixed_latents(seed=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ , shape=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = latents
return inputs
def a_ ( self : Union[str, Any] , UpperCamelCase_ : int=0 , UpperCamelCase_ : Tuple="cpu" , UpperCamelCase_ : Tuple=torch.floataa , UpperCamelCase_ : Optional[Any]=(1, 3, 64, 64)):
"""simple docstring"""
if type(UpperCamelCase_) == str:
__UpperCAmelCase : Union[str, Any] = torch.device(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_)
return latents
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Dict = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Dict = self.get_inputs()
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Any = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : int = self.get_inputs()
__UpperCAmelCase : str = 1
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
@require_torch_a
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : int = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_a
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : List[Any] = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[str] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 487
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : str = "▁"
_lowerCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCamelCase : int = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowerCamelCase : Dict = {
"facebook/mbart-large-50-one-to-many-mmt": 1_0_2_4,
}
# fmt: off
_lowerCamelCase : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ) -> None:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
_lowerCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[Any] = len(self.sp_model )
_lowerCAmelCase : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase : Tuple = src_lang if src_lang is not None else """en_XX"""
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.__dict__.copy()
_lowerCAmelCase : List[Any] = None
return state
def __setstate__( self : List[str] , _UpperCAmelCase : Dict ) -> None:
'''simple docstring'''
_lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Optional[int] = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : str = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : int = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : List[Any] = self.convert_tokens_to_ids(_UpperCAmelCase )
_lowerCAmelCase : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : str = "en_XX" , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "ro_RO" , **_UpperCAmelCase : str , ) -> BatchEncoding:
'''simple docstring'''
_lowerCAmelCase : int = src_lang
_lowerCAmelCase : int = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[src_lang]
_lowerCAmelCase : Optional[int] = [self.cur_lang_code_id]
_lowerCAmelCase : Tuple = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase : List[str] = [self.cur_lang_code_id]
_lowerCAmelCase : Tuple = [self.eos_token_id]
| 429
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : int = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_lowerCAmelCase : Tuple = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowerCAmelCase : Tuple = DDPMScheduler()
_lowerCAmelCase : str = AudioDiffusionPipeline(vqvae=_UpperCAmelCase , unet=self.dummy_unet , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : List[Any] = pipe(generator=_UpperCAmelCase , steps=4 )
_lowerCAmelCase : List[str] = output.audios[0]
_lowerCAmelCase : int = output.images[0]
_lowerCAmelCase : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Union[str, Any] = pipe(generator=_UpperCAmelCase , steps=4 , return_dict=_UpperCAmelCase )
_lowerCAmelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowerCAmelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Dict = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowerCAmelCase : Any = DDIMScheduler()
_lowerCAmelCase : List[Any] = self.dummy_vqvae_and_unet
_lowerCAmelCase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
_lowerCAmelCase : Union[str, Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_lowerCAmelCase : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Dict = pipe(raw_audio=_UpperCAmelCase , generator=_UpperCAmelCase , start_step=5 , steps=10 )
_lowerCAmelCase : Tuple = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowerCAmelCase : List[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Optional[int] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase : Union[str, Any] = self.dummy_unet_condition
_lowerCAmelCase : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCAmelCase , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
_lowerCAmelCase : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
_lowerCAmelCase : Any = torch.rand((1, 1, 10) )
_lowerCAmelCase : List[Any] = pipe(generator=_UpperCAmelCase , encoding=_UpperCAmelCase )
_lowerCAmelCase : Tuple = output.images[0]
_lowerCAmelCase : Any = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : str = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = torch_device
_lowerCAmelCase : List[str] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_lowerCAmelCase : str = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Tuple = pipe(generator=_UpperCAmelCase )
_lowerCAmelCase : Any = output.audios[0]
_lowerCAmelCase : List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowerCAmelCase : Any = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 429
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
def is_in_circle(_lowerCAmelCase , _lowerCAmelCase ) -> bool:
A : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A : str = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
A : Union[str, Any] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_lowerCAmelCase , _lowerCAmelCase ) ) for _ in range(_lowerCAmelCase ) ) * (max_value - min_value)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 ) -> None:
"""simple docstring"""
def identity_function(_lowerCAmelCase ) -> float:
return x
A : Dict = area_under_curve_estimator(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
def function_to_integrate(_lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
A : List[str] = area_under_curve_estimator(
_lowerCAmelCase , _lowerCAmelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
SCREAMING_SNAKE_CASE_:Union[str, Any] = random.Random()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
A : str = global_rng
A : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=400, lowerCamelCase__=2000, lowerCamelCase__=24, lowerCamelCase__=24, lowerCamelCase__=0.0, lowerCamelCase__=1_6000, lowerCamelCase__=True, lowerCamelCase__=True, ):
A : Optional[int] = parent
A : List[Any] = batch_size
A : str = min_seq_length
A : str = max_seq_length
A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A : Dict = feature_size
A : Any = num_mel_bins
A : int = padding_value
A : Optional[int] = sampling_rate
A : str = return_attention_mask
A : int = do_normalize
def _lowerCAmelCase ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
A : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
A : Tuple = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowerCAmelCase ( self ):
A : Tuple = SpeechaTextFeatureExtractionTester(self )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
self.assertTrue(np.all(np.mean(lowerCamelCase__, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__, axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCAmelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A : List[str] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[Any] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
A : Any = feature_extractor(lowerCamelCase__, padding=lowerCamelCase__, return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
A : List[str] = feature_extractor(speech_inputs[0], return_tensors="""np""" ).input_features
A : Tuple = feature_extractor(np_speech_inputs[0], return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
# Test batched
A : List[Any] = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
A : int = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A : Optional[Any] = np.asarray(lowerCamelCase__ )
A : List[Any] = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
A : str = feature_extractor(lowerCamelCase__, return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : Any = ["""longest""", """max_length""", """do_not_pad"""]
A : int = [None, 16, None]
for max_length, padding in zip(lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = feature_extractor(
lowerCamelCase__, padding=lowerCamelCase__, max_length=lowerCamelCase__, return_attention_mask=lowerCamelCase__ )
A : Tuple = inputs.input_features
A : Union[str, Any] = inputs.attention_mask
A : Optional[Any] = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
A : Tuple = [None, 16, None]
for max_length, padding in zip(lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = feature_extractor(
lowerCamelCase__, max_length=lowerCamelCase__, padding=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__ )
A : Optional[int] = inputs.input_features
A : List[Any] = inputs.attention_mask
A : str = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowerCAmelCase ( self ):
A : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : Optional[Any] = feature_extractor(
lowerCamelCase__, padding="""max_length""", max_length=4, truncation=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__, )
A : Union[str, Any] = inputs.input_features
A : Optional[Any] = inputs.attention_mask
A : Dict = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _lowerCAmelCase ( self ):
A : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[Any] = feature_extractor(
lowerCamelCase__, padding="""longest""", max_length=4, truncation=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__, )
A : List[Any] = inputs.input_features
A : Optional[Any] = inputs.attention_mask
A : List[str] = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
A : int = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : List[str] = feature_extractor(
lowerCamelCase__, padding="""longest""", max_length=16, truncation=lowerCamelCase__, return_tensors="""np""", return_attention_mask=lowerCamelCase__, )
A : List[Any] = inputs.input_features
A : List[str] = inputs.attention_mask
A : List[str] = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def _lowerCAmelCase ( self ):
import torch
A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Dict = np.random.rand(100, 32 ).astype(np.floataa )
A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}], return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A : str = feature_extractor.pad([{"""input_features""": inputs}], return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
from datasets import load_dataset
A : List[str] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""", """clean""", split="""validation""" )
# automatic decoding with librispeech
A : int = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self ):
# fmt: off
A : Optional[Any] = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
A : Optional[Any] = self._load_datasamples(1 )
A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A : Optional[Any] = feature_extractor(lowerCamelCase__, return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCamelCase__, atol=1e-4 ) )
| 520
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> None:
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 466
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _UpperCAmelCase ( __A : Any ):
a_ : List[str] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , __A ).groups()[0]
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
a_ : List[str] = file_names
a_ : str = image_transform
a_ : Optional[Any] = label_to_id
def __len__( self : List[str] ) -> Dict:
return len(self.file_names )
def __getitem__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : Union[str, Any] = self.file_names[idx]
a_ : Union[str, Any] = PIL.Image.open(__SCREAMING_SNAKE_CASE )
a_ : int = raw_image.convert('''RGB''' )
if self.image_transform is not None:
a_ : str = self.image_transform(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = extract_label(__SCREAMING_SNAKE_CASE )
if self.label_to_id is not None:
a_ : Any = self.label_to_id[label]
return {"image": image, "label": label}
def _UpperCAmelCase ( __A : List[Any] , __A : Any ):
# Initialize accelerator
if args.with_tracking:
a_ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
a_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ : Dict = config['''lr''']
a_ : str = int(config['''num_epochs'''] )
a_ : Any = int(config['''seed'''] )
a_ : Any = int(config['''batch_size'''] )
a_ : Optional[Any] = config['''image_size''']
if not isinstance(__A , (list, tuple) ):
a_ : Union[str, Any] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
a_ : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a_ : List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
a_ : str = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a_ : Dict = os.path.split(__A )[-1].split('''.''' )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
a_ : str = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
a_ : Dict = [extract_label(__A ) for fname in file_names]
a_ : Optional[Any] = list(set(__A ) )
id_to_label.sort()
a_ : Any = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
a_ : Optional[Any] = np.random.permutation(len(__A ) )
a_ : Dict = int(0.8 * len(__A ) )
a_ : Optional[Any] = random_perm[:cut]
a_ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a_ : Optional[int] = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
a_ : str = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
a_ : Any = Compose([Resize(__A ), ToTensor()] )
a_ : Tuple = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
a_ : Dict = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
a_ : str = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ : Union[str, Any] = create_model('''resnet50d''' , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a_ : int = False
for param in model.get_classifier().parameters():
a_ : Tuple = True
# We normalize the batches of images to be a bit faster.
a_ : Union[str, Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
a_ : Union[str, Any] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a_ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
a_ : Tuple = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ : Tuple = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
a_ : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
a_ : Dict = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
a_ : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a_ : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a_ : Tuple = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a_ : List[str] = os.path.splitext(__A )[0]
if "epoch" in training_difference:
a_ : Tuple = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
a_ : str = None
else:
a_ : List[Any] = int(training_difference.replace('''step_''' , '''''' ) )
a_ : Union[str, Any] = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
a_ : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a_ : Union[str, Any] = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a_ : List[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a_ : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
a_ : int = (batch['''image'''] - mean) / std
a_ : Any = model(__A )
a_ : List[Any] = torch.nn.functional.cross_entropy(__A , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
a_ : int = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a_ : Union[str, Any] = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
a_ : str = 0
a_ : List[Any] = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a_ : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
a_ : List[Any] = (batch['''image'''] - mean) / std
with torch.no_grad():
a_ : Union[str, Any] = model(__A )
a_ : List[str] = outputs.argmax(dim=-1 )
a_ , a_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
a_ : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a_ : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {1_00 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_00 * eval_metric,
'''train_loss''': total_loss.item() / len(__A ),
'''epoch''': epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
a_ : Dict = f'epoch_{epoch}'
if args.output_dir is not None:
a_ : int = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _UpperCAmelCase ( ):
a_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=__A , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=__A , default=__A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=__A , default=__A , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=__A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__A , default=__A , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
a_ : Union[str, Any] = parser.parse_args()
a_ : List[Any] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 466
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a_ ( ):
'''simple docstring'''
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
print('Generating prime p...' )
lowercase__ : Any = rabinMiller.generate_large_prime(_lowerCAmelCase )
print('Generating prime q...' )
lowercase__ : str = rabinMiller.generate_large_prime(_lowerCAmelCase )
lowercase__ : Any = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
lowercase__ : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
lowercase__ : Tuple = cryptoMath.find_mod_inverse(_lowerCAmelCase , (p - 1) * (q - 1) )
lowercase__ : Any = (n, e)
lowercase__ : List[str] = (n, d)
return (public_key, private_key)
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowercase__ , lowercase__ : str = generate_key(_lowerCAmelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['image_processor', 'tokenizer']
__magic_name__ = 'ViltImageProcessor'
__magic_name__ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
_A = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
_A = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
_A = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self ):
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 27
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
SCREAMING_SNAKE_CASE = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _lowerCamelCase ( __A : Tuple ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = None
# source code of `config_class`
_UpperCAmelCase : Dict = inspect.getsource(__A )
_UpperCAmelCase : Dict = _re_checkpoint.findall(__A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
_UpperCAmelCase : int = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : str = ckpt_name
break
return checkpoint
def _lowerCamelCase ( ) -> Any:
_UpperCAmelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase : int = get_checkpoint_from_config_class(__A )
_UpperCAmelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__A )
if len(__A ) > 0:
_UpperCAmelCase : Any = '''\n'''.join(sorted(__A ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 485
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
snake_case_ = """docs/source/en/_toctree.yml"""
def _lowerCamelCase( UpperCamelCase__ : Optional[Any] ) -> str:
A : Optional[int] = defaultdict(UpperCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
A : Optional[int] = [key for key, value in counts.items() if value > 1]
A : List[Any] = []
for duplicate_key in duplicates:
A : int = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
def _lowerCamelCase( UpperCamelCase__ : Dict=False ) -> Tuple:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
A : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
A : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A : Optional[int] = content[api_idx]['''sections''']
# Then to the model doc
A : str = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A : List[str] = api_doc[model_idx]['''sections''']
A : Tuple = [(idx, section) for idx, section in enumerate(UpperCamelCase__ ) if '''sections''' in section]
A : Any = False
for idx, modality_doc in modalities_docs:
A : int = modality_doc['''sections''']
A : str = clean_model_doc_toc(UpperCamelCase__ )
if old_modality_doc != new_modality_doc:
A : Tuple = True
if overwrite:
A : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
A : Optional[int] = model_doc
A : List[str] = api_doc
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 537
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class _lowercase ( a ):
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=125 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A : str = [f'''<extra_id_{i}>''' for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A : List[str] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
A : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
A : int = extra_ids
A : Any = 2**8 # utf is 8 bits
# define special tokens dict
A : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
A : Tuple = len(self.special_tokens_encoder )
A : Union[str, Any] = len(_UpperCAmelCase )
for i, token in enumerate(_UpperCAmelCase ):
A : Optional[Any] = self.vocab_size + i - n
A : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def snake_case ( self , _UpperCAmelCase ):
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
A : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
A : Union[str, Any] = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
A : Tuple = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def snake_case ( self , _UpperCAmelCase ):
A : str = [chr(_UpperCAmelCase ) for i in text.encode('''utf-8''' )]
return tokens
def snake_case ( self , _UpperCAmelCase ):
if token in self.special_tokens_encoder:
A : Union[str, Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
A : int = self.added_tokens_encoder[token]
elif len(_UpperCAmelCase ) != 1:
A : Union[str, Any] = self.unk_token_id
else:
A : Union[str, Any] = ord(_UpperCAmelCase ) + self._num_special_tokens
return token_id
def snake_case ( self , _UpperCAmelCase ):
if index in self.special_tokens_decoder:
A : Optional[Any] = self.special_tokens_decoder[index]
else:
A : Dict = chr(index - self._num_special_tokens )
return token
def snake_case ( self , _UpperCAmelCase ):
A : List[str] = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
A : List[str] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
A : Optional[int] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
A : Union[str, Any] = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
A : str = token.encode('''utf-8''' )
else:
A : Tuple = bytes([ord(_UpperCAmelCase )] )
bstring += tok_string
A : List[Any] = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
return ()
| 537
| 1
|
def a_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
__lowerCAmelCase = []
def generate(lowerCAmelCase_ : int, lowerCAmelCase_ : list ):
__lowerCAmelCase = [0] * n
res.append(tuple(lowerCAmelCase_ ) )
__lowerCAmelCase = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__lowerCAmelCase = arr[i], arr[0]
else:
__lowerCAmelCase = arr[i], arr[c[i]]
res.append(tuple(lowerCAmelCase_ ) )
c[i] += 1
__lowerCAmelCase = 0
else:
__lowerCAmelCase = 0
i += 1
generate(len(lowerCAmelCase_ ), lowerCAmelCase_ )
return res
if __name__ == "__main__":
_snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : List[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 53
|
from ....utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], _snake_case : List[str], _snake_case : Any=None, _snake_case : Tuple=2_0_4_8 ) ->List[str]:
snake_case__ : Dict = config.__dict__
snake_case__ : Optional[Any] = modal_hidden_size
if num_labels:
snake_case__ : Union[str, Any] = num_labels
| 478
| 0
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowercase__ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Any:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = pipeline(
'''document-question-answering''', model=__magic_name__, tokenizer=__magic_name__, image_processor=__magic_name__ )
UpperCamelCase__ : Tuple = INVOICE_URL
UpperCamelCase__ : Union[str, Any] = list(zip(*apply_tesseract(load_image(__magic_name__ ), __magic_name__, '''''' ) ) )
UpperCamelCase__ : int = '''What is the placebo?'''
UpperCamelCase__ : Optional[int] = [
{
'''image''': load_image(__magic_name__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = dqa_pipeline(__magic_name__, top_k=2 )
self.assertEqual(
__magic_name__, [
[
{'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ ), '''start''': ANY(__magic_name__ ), '''end''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''answer''': ANY(__magic_name__ ), '''start''': ANY(__magic_name__ ), '''end''': ANY(__magic_name__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : str = pipeline('''document-question-answering''', model='''hf-internal-testing/tiny-random-layoutlmv2''' )
UpperCamelCase__ : Tuple = INVOICE_URL
UpperCamelCase__ : Dict = '''How many cats are there?'''
UpperCamelCase__ : int = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
UpperCamelCase__ : List[Any] = dqa_pipeline(image=__magic_name__, question=__magic_name__, top_k=2 )
self.assertEqual(nested_simplify(__magic_name__, decimals=4 ), __magic_name__ )
UpperCamelCase__ : Any = dqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(nested_simplify(__magic_name__, decimals=4 ), __magic_name__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCamelCase__ : Optional[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCamelCase__ : Any = dqa_pipeline(image=__magic_name__, question=__magic_name__, top_k=2 )
self.assertEqual(__magic_name__, [] )
# We can optionnally pass directly the words and bounding boxes
UpperCamelCase__ : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Optional[int] = dqa_pipeline(image=__magic_name__, question=__magic_name__, words=__magic_name__, boxes=__magic_name__, top_k=2 )
self.assertEqual(__magic_name__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = pipeline(
'''document-question-answering''', model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''', revision='''9977165''', )
UpperCamelCase__ : Dict = INVOICE_URL
UpperCamelCase__ : str = '''What is the invoice number?'''
UpperCamelCase__ : Any = dqa_pipeline(image=__magic_name__, question=__magic_name__, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
], )
UpperCamelCase__ : int = dqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
], )
UpperCamelCase__ : Tuple = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}], top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = pipeline(
'''document-question-answering''', model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''', revision='''9977165''', max_seq_len=50, )
UpperCamelCase__ : Optional[Any] = INVOICE_URL
UpperCamelCase__ : List[Any] = '''What is the invoice number?'''
UpperCamelCase__ : List[Any] = dqa_pipeline(image=__magic_name__, question=__magic_name__, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
], )
UpperCamelCase__ : Tuple = dqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
], )
UpperCamelCase__ : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}], top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : str = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''', revision='''3dc6de3''', add_prefix_space=__magic_name__ )
UpperCamelCase__ : List[str] = pipeline(
'''document-question-answering''', model='''impira/layoutlm-document-qa''', tokenizer=__magic_name__, revision='''3dc6de3''', )
UpperCamelCase__ : Union[str, Any] = INVOICE_URL
UpperCamelCase__ : str = '''What is the invoice number?'''
UpperCamelCase__ : Tuple = dqa_pipeline(image=__magic_name__, question=__magic_name__, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
], )
UpperCamelCase__ : List[Any] = dqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
], )
UpperCamelCase__ : Tuple = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}], top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2, )
UpperCamelCase__ : str = list(zip(*apply_tesseract(load_image(__magic_name__ ), __magic_name__, '''''' ) ) )
# This model should also work if `image` is set to None
UpperCamelCase__ : Any = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''', revision='''3dc6de3''', add_prefix_space=__magic_name__ )
UpperCamelCase__ : int = pipeline(
'''document-question-answering''', model='''impira/layoutlm-document-qa''', tokenizer=__magic_name__, revision='''3dc6de3''', max_seq_len=50, )
UpperCamelCase__ : List[str] = INVOICE_URL
UpperCamelCase__ : Union[str, Any] = '''What is the invoice number?'''
UpperCamelCase__ : Dict = dqa_pipeline(image=__magic_name__, question=__magic_name__, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
], )
UpperCamelCase__ : Tuple = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}], top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2, )
UpperCamelCase__ : Tuple = list(zip(*apply_tesseract(load_image(__magic_name__ ), __magic_name__, '''''' ) ) )
# This model should also work if `image` is set to None
UpperCamelCase__ : Dict = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__, decimals=4 ), [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
], )
@slow
@require_torch
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : str = pipeline(
'''document-question-answering''', model='''naver-clova-ix/donut-base-finetuned-docvqa''', tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ), feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''', )
UpperCamelCase__ : List[str] = INVOICE_URL
UpperCamelCase__ : int = '''What is the invoice number?'''
UpperCamelCase__ : Tuple = dqa_pipeline(image=__magic_name__, question=__magic_name__, top_k=2 )
self.assertEqual(nested_simplify(__magic_name__, decimals=4 ), [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
pass
| 369
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> Union[str, Any]:
UpperCamelCase__ : List[str] = model.config
UpperCamelCase__ : Optional[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCamelCase__ : Union[str, Any] = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Tuple:
if "encoder.model" in name:
UpperCamelCase__ : str = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
UpperCamelCase__ : Optional[int] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
UpperCamelCase__ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase__ : Optional[int] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
UpperCamelCase__ : Optional[Any] = '''encoder.''' + name
if "attn.proj" in name:
UpperCamelCase__ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
UpperCamelCase__ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase__ : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase__ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase__ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCamelCase__ : Optional[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCamelCase__ : Any = '''encoder.layernorm.bias'''
return name
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> Dict:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ : Dict = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
UpperCamelCase__ : Tuple = key.split('''.''' )
UpperCamelCase__ : str = int(key_split[3] )
UpperCamelCase__ : List[Any] = int(key_split[5] )
UpperCamelCase__ : Optional[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ : Tuple = val[:dim, :]
UpperCamelCase__ : Dict = val[dim : dim * 2, :]
UpperCamelCase__ : Dict = val[-dim:, :]
else:
UpperCamelCase__ : Optional[int] = val[:dim]
UpperCamelCase__ : str = val[dim : dim * 2]
UpperCamelCase__ : int = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCamelCase__ : Any = val
return orig_state_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any]=None , __UpperCAmelCase: Tuple=False ) -> Optional[int]:
# load original model
UpperCamelCase__ : List[Any] = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
UpperCamelCase__ ,UpperCamelCase__ : List[str] = get_configs(__UpperCAmelCase )
UpperCamelCase__ : int = DonutSwinModel(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = MBartForCausalLM(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
UpperCamelCase__ : List[Any] = original_model.state_dict()
UpperCamelCase__ : List[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
UpperCamelCase__ : Optional[int] = load_dataset('''hf-internal-testing/example-documents''' )
UpperCamelCase__ : Any = dataset['''test'''][0]['''image'''].convert('''RGB''' )
UpperCamelCase__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
UpperCamelCase__ : Dict = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCamelCase__ : Any = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : str = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCamelCase__ : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCamelCase__ : int = '''When is the coffee break?'''
UpperCamelCase__ : List[str] = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCamelCase__ : int = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCamelCase__ : Optional[int] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCamelCase__ : Any = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCamelCase__ : List[Any] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCamelCase__ : Dict = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
UpperCamelCase__ : Tuple = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
UpperCamelCase__ : List[str] = original_model.encoder.model.patch_embed(__UpperCAmelCase )
UpperCamelCase__ ,UpperCamelCase__ : List[str] = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
# verify encoder hidden states
UpperCamelCase__ : Dict = original_model.encoder(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-2 )
# verify decoder hidden states
UpperCamelCase__ : Optional[Any] = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
UpperCamelCase__ : str = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 369
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = '''fnet'''
def __init__( self , lowerCamelCase=32_000 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=4 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=False , lowerCamelCase=512 , lowerCamelCase=3 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
_snake_case = use_tpu_fourier_optimizations
_snake_case = tpu_short_seq_length
| 672
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( UpperCamelCase_ ):
_lowercase =['''image_processor''', '''tokenizer''']
_lowercase ='''BridgeTowerImageProcessor'''
_lowercase =('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ) -> BatchEncoding:
lowerCAmelCase_ = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
lowerCAmelCase_ = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def __a ( self ) -> Any:
lowerCAmelCase_ = self.tokenizer.model_input_names
lowerCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 719
|
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 0
|
'''simple docstring'''
import string
import numpy
def A_ ( snake_case , snake_case ):
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class _snake_case :
_A : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_A : List[Any] = numpy.vectorize(lambda _a : x % 3_6 )
_A : int = numpy.vectorize(A_ )
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : numpy.ndarray ):
SCREAMING_SNAKE_CASE:List[str] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
SCREAMING_SNAKE_CASE:Tuple = encrypt_key.shape[0]
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ):
return self.key_string.index(_lowerCamelCase )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int ):
return self.key_string[round(_lowerCamelCase )]
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE:List[Any] = det % len(self.key_string )
SCREAMING_SNAKE_CASE:List[str] = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase ,len(self.key_string ) ) != 1:
SCREAMING_SNAKE_CASE:Optional[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_lowerCamelCase )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Dict = [char for char in text.upper() if char in self.key_string]
SCREAMING_SNAKE_CASE:Union[str, Any] = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:List[Any] = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE:Union[str, Any] = ""
for i in range(0 ,len(_lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
SCREAMING_SNAKE_CASE:int = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE:Any = [self.replace_letters(_lowerCamelCase ) for char in batch]
SCREAMING_SNAKE_CASE:Optional[int] = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE:str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
SCREAMING_SNAKE_CASE:Any = "".join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE:int = det % len(self.key_string )
SCREAMING_SNAKE_CASE:List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
SCREAMING_SNAKE_CASE:List[Any] = i
break
SCREAMING_SNAKE_CASE:List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Optional[int] = self.make_decrypt_key()
SCREAMING_SNAKE_CASE:str = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE:Union[str, Any] = ""
for i in range(0 ,len(_lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
SCREAMING_SNAKE_CASE:Any = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE:List[str] = [self.replace_letters(_lowerCamelCase ) for char in batch]
SCREAMING_SNAKE_CASE:Tuple = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE:str = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
SCREAMING_SNAKE_CASE:Optional[Any] = "".join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A_ ( ):
SCREAMING_SNAKE_CASE:List[Any] = int(input("Enter the order of the encryption key: " ) )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE:str = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE:Any = HillCipher(numpy.array(__lowerCamelCase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
SCREAMING_SNAKE_CASE:Dict = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
SCREAMING_SNAKE_CASE:int = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
SCREAMING_SNAKE_CASE:int = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 143
|
"""simple docstring"""
from __future__ import annotations
import math
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : int ):
_snake_case = size
# approximate the overall size of segment tree with given value
_snake_case = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_snake_case = [0 for i in range(0 , 4 * size )]
_snake_case = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase ( self : Optional[Any] , _lowerCamelCase : int ):
return idx * 2
def lowercase ( self : Dict , _lowerCamelCase : int ):
return idx * 2 + 1
def lowercase ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] ):
if left_element == right_element:
_snake_case = a[left_element - 1]
else:
_snake_case = (left_element + right_element) // 2
self.build(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.build(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase )
_snake_case = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
def lowercase ( self : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_snake_case = val
if left_element != right_element:
_snake_case = val
_snake_case = val
_snake_case = True
_snake_case = True
return True
_snake_case = (left_element + right_element) // 2
self.update(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.update(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
return True
def lowercase ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_snake_case = (left_element + right_element) // 2
_snake_case = self.query(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = self.query(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return max(_lowerCamelCase , _lowerCamelCase )
def __str__( self : List[Any] ):
return str([self.query(1 , 1 , self.size , _lowerCamelCase , _lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase__ = 15
UpperCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 224
| 0
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def _a ( ):
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : Dict = CursorInfo()
SCREAMING_SNAKE_CASE__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def _a ( ):
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : Any = CursorInfo()
SCREAMING_SNAKE_CASE__ : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase__ , ctypes.byref(lowercase__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def _a ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 636
|
class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636
| 1
|
_SCREAMING_SNAKE_CASE = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 537
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
| 537
| 1
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__UpperCamelCase : int = get_logger(__name__)
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : Optional[Any], _snake_case : int, _snake_case : int, _snake_case : int=0 ):
os.makedirs(_snake_case, exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
_lowercase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
_lowercase = os.path.join(_snake_case, _snake_case )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_snake_case, _snake_case )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowercase = os.path.join(_snake_case, _snake_case )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_snake_case, _snake_case )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase = os.path.join(_snake_case, f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(_snake_case, exist_ok=_snake_case )
logger.info(f"""Saving model to {ckpt_dir}""" )
_lowercase = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_snake_case, storage_writer=dist_cp.FileSystemWriter(_snake_case ), planner=DefaultSavePlanner(), )
logger.info(f"""Model saved to {ckpt_dir}""" )
def __UpperCAmelCase ( _snake_case : Tuple, _snake_case : int, _snake_case : List[str], _snake_case : str, _snake_case : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
_lowercase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
_lowercase = os.path.join(_snake_case, _snake_case )
logger.info(f"""Loading model from {input_model_file}""" )
_lowercase = torch.load(_snake_case )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowercase = os.path.join(_snake_case, _snake_case )
logger.info(f"""Loading model from {input_model_file}""" )
_lowercase = torch.load(_snake_case )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase = (
os.path.join(_snake_case, f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
_lowercase = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_snake_case, storage_reader=dist_cp.FileSystemReader(_snake_case ), planner=DefaultLoadPlanner(), )
_lowercase = state_dict["model"]
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(_snake_case )
def __UpperCAmelCase ( _snake_case : Optional[int], _snake_case : Optional[int], _snake_case : str, _snake_case : Any, _snake_case : List[Any], _snake_case : List[Any]=0 ):
os.makedirs(_snake_case, exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
_lowercase = FSDP.optim_state_dict(_snake_case, _snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowercase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowercase = os.path.join(_snake_case, _snake_case )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(_snake_case, _snake_case )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
_lowercase = os.path.join(_snake_case, f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(_snake_case, exist_ok=_snake_case )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state}, storage_writer=dist_cp.FileSystemWriter(_snake_case ), planner=DefaultSavePlanner(), )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def __UpperCAmelCase ( _snake_case : List[str], _snake_case : int, _snake_case : Optional[Any], _snake_case : Any, _snake_case : Optional[int], _snake_case : Tuple=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowercase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowercase = os.path.join(_snake_case, _snake_case )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
_lowercase = torch.load(_snake_case )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
_lowercase = (
os.path.join(_snake_case, f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
_lowercase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key="optimizer", storage_reader=dist_cp.FileSystemReader(_snake_case ), )
_lowercase = optim_state["optimizer"]
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
_lowercase = FSDP.optim_state_dict_to_load(_snake_case, _snake_case, _snake_case )
optimizer.load_state_dict(_snake_case )
| 227
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
__UpperCamelCase : Optional[Any] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 227
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def snake_case ( ) -> Generator[int, None, None]:
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = 2
while True:
lowerCAmelCase = factor_map.pop(snake_case , snake_case )
if factor:
lowerCAmelCase = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase = factor
else:
lowerCAmelCase = prime
yield prime
prime += 1
def snake_case ( snake_case : float = 1e10 ) -> int:
"""simple docstring"""
lowerCAmelCase = sieve()
lowerCAmelCase = 1
while True:
lowerCAmelCase = next(snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 284
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = GPTSwaTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = 'This is a test'
lowerCAmelCase = 'This is a test'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '<s>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 20_00 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase = {'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_SCREAMING_SNAKE_CASE , )
| 284
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = botoa.client("iam" )
_SCREAMING_SNAKE_CASE : str = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCamelCase__, AssumeRolePolicyDocument=json.dumps(lowerCamelCase__, indent=2 ) )
_SCREAMING_SNAKE_CASE : Optional[int] = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCamelCase__, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(lowerCamelCase__, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = botoa.client("iam" )
return iam_client.get_role(RoleName=lowerCamelCase__ )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
"How do you want to authorize?", ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "], lowerCamelCase__, )
_SCREAMING_SNAKE_CASE : int = None
if credentials_configuration == 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field("Enter your AWS Profile name: [default] ", default="default" )
_SCREAMING_SNAKE_CASE : int = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
_SCREAMING_SNAKE_CASE : str = _ask_field("AWS Access Key ID: " )
_SCREAMING_SNAKE_CASE : int = aws_access_key_id
_SCREAMING_SNAKE_CASE : int = _ask_field("AWS Secret Access Key: " )
_SCREAMING_SNAKE_CASE : Any = aws_secret_access_key
_SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1" )
_SCREAMING_SNAKE_CASE : Any = aws_region
_SCREAMING_SNAKE_CASE : Dict = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?", ["Provide IAM Role name", "Create new IAM role using credentials"], lowerCamelCase__, )
if role_management == 0:
_SCREAMING_SNAKE_CASE : Dict = _ask_field("Enter your IAM role name: " )
else:
_SCREAMING_SNAKE_CASE : str = "accelerate_sagemaker_execution_role"
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: ", _convert_yes_no_to_bool, default=lowerCamelCase__, error_message="Please enter yes or no.", )
_SCREAMING_SNAKE_CASE : int = None
if is_custom_docker_image:
_SCREAMING_SNAKE_CASE : List[Any] = _ask_field("Enter your Docker image: ", lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() )
_SCREAMING_SNAKE_CASE : List[Any] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: ", _convert_yes_no_to_bool, default=lowerCamelCase__, error_message="Please enter yes or no.", )
_SCREAMING_SNAKE_CASE : List[str] = None
if is_sagemaker_inputs_enabled:
_SCREAMING_SNAKE_CASE : Tuple = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ", lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower(), )
_SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: ", _convert_yes_no_to_bool, default=lowerCamelCase__, error_message="Please enter yes or no.", )
_SCREAMING_SNAKE_CASE : int = None
if is_sagemaker_metrics_enabled:
_SCREAMING_SNAKE_CASE : Tuple = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ", lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower(), )
_SCREAMING_SNAKE_CASE : Any = _ask_options(
"What is the distributed mode?", ["No distributed training", "Data parallelism"], _convert_sagemaker_distributed_mode, )
_SCREAMING_SNAKE_CASE : Optional[Any] = {}
_SCREAMING_SNAKE_CASE : int = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:", _convert_yes_no_to_bool, default=lowerCamelCase__, error_message="Please enter yes or no.", )
if use_dynamo:
_SCREAMING_SNAKE_CASE : List[Any] = "dynamo_"
_SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
"Which dynamo backend would you like to use?", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
_SCREAMING_SNAKE_CASE : List[str] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", _convert_yes_no_to_bool, default=lowerCamelCase__, error_message="Please enter yes or no.", )
if use_custom_options:
_SCREAMING_SNAKE_CASE : List[str] = _ask_options(
"Which mode do you want to use?", lowerCamelCase__, lambda lowerCamelCase__ : TORCH_DYNAMO_MODES[int(lowerCamelCase__ )], default="default", )
_SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", _convert_yes_no_to_bool, default=lowerCamelCase__, error_message="Please enter yes or no.", )
_SCREAMING_SNAKE_CASE : Any = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: ", _convert_yes_no_to_bool, default=lowerCamelCase__, error_message="Please enter yes or no.", )
_SCREAMING_SNAKE_CASE : str = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
_SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
lowerCamelCase__, lowerCamelCase__, lambda lowerCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCamelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_SCREAMING_SNAKE_CASE : int = _ask_field(lowerCamelCase__, lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower(), default="ml.p3.2xlarge" )
_SCREAMING_SNAKE_CASE : str = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_SCREAMING_SNAKE_CASE : Dict = _ask_field(
"How many machines do you want use? [1]: ", lowerCamelCase__, default=1, )
_SCREAMING_SNAKE_CASE : int = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?", ["no", "fp16", "bf16", "fp8"], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowerCamelCase__, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=lowerCamelCase__, use_cpu=lowerCamelCase__, dynamo_config=lowerCamelCase__, eca_instance_type=lowerCamelCase__, profile=lowerCamelCase__, region=lowerCamelCase__, iam_role_name=lowerCamelCase__, mixed_precision=lowerCamelCase__, num_machines=lowerCamelCase__, sagemaker_inputs_file=lowerCamelCase__, sagemaker_metrics_file=lowerCamelCase__, )
| 295
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class UpperCamelCase :
def __init__( self , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = value
_SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
_SCREAMING_SNAKE_CASE : Node | None = None
_SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCamelCase :
def __init__( self , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = root
def __str__( self ):
"""simple docstring"""
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if new_children is not None: # reset its kids
_SCREAMING_SNAKE_CASE : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case__ ): # If it is the right children
_SCREAMING_SNAKE_CASE : Any = new_children
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_children
else:
_SCREAMING_SNAKE_CASE : Any = new_children
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = Node(snake_case__ ) # create a new Node
if self.empty(): # if Tree is empty
_SCREAMING_SNAKE_CASE : str = new_node # set its root
else: # Tree is not empty
_SCREAMING_SNAKE_CASE : Dict = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_SCREAMING_SNAKE_CASE : int = parent_node.left
else:
if parent_node.right is None:
_SCREAMING_SNAKE_CASE : str = new_node
break
else:
_SCREAMING_SNAKE_CASE : Optional[int] = parent_node.right
_SCREAMING_SNAKE_CASE : Any = parent_node
def __SCREAMING_SNAKE_CASE ( self , *snake_case__ ):
"""simple docstring"""
for value in values:
self.__insert(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_SCREAMING_SNAKE_CASE : List[Any] = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , snake_case__ = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
_SCREAMING_SNAKE_CASE : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
_SCREAMING_SNAKE_CASE : Dict = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , snake_case__ = None ):
"""simple docstring"""
if node is None:
_SCREAMING_SNAKE_CASE : List[Any] = self.root
if self.root is None:
return None
if not self.empty():
_SCREAMING_SNAKE_CASE : Any = self.root
while node.left is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.search(snake_case__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case__ , snake_case__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case__ , node.left )
else:
_SCREAMING_SNAKE_CASE : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_SCREAMING_SNAKE_CASE : List[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , snake_case__=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if node:
self.inorder(snake_case__ , node.left )
arr.append(node.value )
self.inorder(snake_case__ , node.right )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(snake_case__ , snake_case__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCAmelCase ( lowerCamelCase__ : Node | None ) -> list[Node]:
_SCREAMING_SNAKE_CASE : Optional[int] = []
if curr_node is not None:
_SCREAMING_SNAKE_CASE : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCAmelCase ( ) -> None:
_SCREAMING_SNAKE_CASE : Any = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
_SCREAMING_SNAKE_CASE : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(lowerCamelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCamelCase__ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: ", t.get_max().value ) # type: ignore
print("Min Value: ", t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCamelCase__ )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 295
| 1
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a__ : Dict = HfApi()
a__ : List[str] = {}
# fmt: off
a__ : Dict = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
a__ : Dict = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
a__ : str = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
a__ : Dict = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
a__ : Any = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
a__ : Optional[int] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
a__ : Optional[int] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
a__ : str = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
a__ : Union[str, Any] = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
a__ : int = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
a__ : List[str] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
a__ : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
a__ : Any = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
a__ : List[Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
a__ : List[Any] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
a__ : Optional[Any] = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a__ : Tuple = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
a__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
a__ : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a__ : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a__ : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 51
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
while a != 0:
_lowercase , _lowercase : Optional[Any] = b % a, a
return b
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if gcd(__UpperCAmelCase , __UpperCAmelCase ) != 1:
_lowercase : str = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__UpperCAmelCase )
_lowercase , _lowercase , _lowercase : Optional[Any] = 1, 0, a
_lowercase , _lowercase , _lowercase : List[Any] = 0, 1, m
while va != 0:
_lowercase : List[Any] = ua // va
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 717
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: str = logging.get_logger(__name__)
UpperCAmelCase: Optional[Any] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "wav2vec2"
def __init__( self ,UpperCAmelCase_=32 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_="group" ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) ,UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) ,UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) ,UpperCAmelCase_=False ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=16 ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=0.05 ,UpperCAmelCase_=10 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0 ,UpperCAmelCase_=3_20 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_="sum" ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) ,UpperCAmelCase_=(5, 3, 3, 1, 1) ,UpperCAmelCase_=(1, 2, 3, 1, 1) ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2 ,UpperCAmelCase_=False ,UpperCAmelCase_=3 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ ,pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ )
_lowercase : List[Any] = hidden_size
_lowercase : Any = feat_extract_norm
_lowercase : Tuple = feat_extract_activation
_lowercase : Tuple = list(UpperCAmelCase_ )
_lowercase : List[str] = list(UpperCAmelCase_ )
_lowercase : List[Any] = list(UpperCAmelCase_ )
_lowercase : List[Any] = conv_bias
_lowercase : Optional[Any] = num_conv_pos_embeddings
_lowercase : Dict = num_conv_pos_embedding_groups
_lowercase : List[Any] = len(self.conv_dim )
_lowercase : str = num_hidden_layers
_lowercase : Any = intermediate_size
_lowercase : int = hidden_act
_lowercase : int = num_attention_heads
_lowercase : Union[str, Any] = hidden_dropout
_lowercase : Dict = attention_dropout
_lowercase : Tuple = activation_dropout
_lowercase : str = feat_proj_dropout
_lowercase : List[str] = final_dropout
_lowercase : Tuple = layerdrop
_lowercase : List[str] = layer_norm_eps
_lowercase : Any = initializer_range
_lowercase : Any = vocab_size
_lowercase : Optional[Any] = do_stable_layer_norm
_lowercase : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase : Union[str, Any] = apply_spec_augment
_lowercase : Optional[Any] = mask_time_prob
_lowercase : Optional[int] = mask_time_length
_lowercase : Dict = mask_time_min_masks
_lowercase : Optional[int] = mask_feature_prob
_lowercase : Tuple = mask_feature_length
_lowercase : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowercase : str = num_codevectors_per_group
_lowercase : Union[str, Any] = num_codevector_groups
_lowercase : Optional[Any] = contrastive_logits_temperature
_lowercase : Tuple = feat_quantizer_dropout
_lowercase : Optional[int] = num_negatives
_lowercase : str = codevector_dim
_lowercase : Optional[int] = proj_codevector_dim
_lowercase : int = diversity_loss_weight
# ctc loss
_lowercase : Optional[int] = ctc_loss_reduction
_lowercase : str = ctc_zero_infinity
# adapter
_lowercase : str = add_adapter
_lowercase : List[str] = adapter_kernel_size
_lowercase : Any = adapter_stride
_lowercase : List[Any] = num_adapter_layers
_lowercase : Optional[Any] = output_hidden_size or hidden_size
_lowercase : str = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase : List[str] = list(UpperCAmelCase_ )
_lowercase : List[Any] = list(UpperCAmelCase_ )
_lowercase : Tuple = list(UpperCAmelCase_ )
_lowercase : List[Any] = xvector_output_dim
@property
def lowerCamelCase__ ( self ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 600
| 0
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
__SCREAMING_SNAKE_CASE = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase )) % modulo_value
def __magic_name__ ( __UpperCAmelCase = 1777 , __UpperCAmelCase = 1855 , __UpperCAmelCase = 8 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = base
for _ in range(1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 109
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 515
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model'}
UpperCamelCase = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCamelCase = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class _A ( UpperCAmelCase_ ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : int=False , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Optional[Any] , ):
"""simple docstring"""
__UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase : str = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCamelCase : List[Any] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCamelCase : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
__UpperCamelCase : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCamelCase : Dict = unk_token if pad_token is None else pad_token
__UpperCamelCase : Any = eos_token if bos_token is None else bos_token
else:
__UpperCamelCase : List[Any] = """<pad>""" if pad_token is None else pad_token
__UpperCamelCase : str = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
__UpperCamelCase : List[str] = do_lower_case
__UpperCamelCase : List[str] = remove_space
__UpperCamelCase : Tuple = keep_accents
__UpperCamelCase : Dict = vocab_file
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCamelCase : Optional[int] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCamelCase : str = re.compile(
f'[{"".join(map(lowerCamelCase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self : str ):
"""simple docstring"""
__UpperCamelCase : Dict = self.__dict__.copy()
__UpperCamelCase : List[str] = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self : Tuple ):
"""simple docstring"""
return len(self.sp_model )
def a ( self : Tuple , lowerCamelCase__ : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = self.non_printing_characters_re.sub("""""" , lowerCamelCase__ )
# Normalize whitespaces
__UpperCamelCase : int = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCamelCase : Dict = unicodedata.normalize("""NFC""" , lowerCamelCase__ )
return text
def a ( self : Optional[Any] , lowerCamelCase__ : str , **lowerCamelCase__ : Dict ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.preprocess_text(lowerCamelCase__ )
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def a ( self : Dict , lowerCamelCase__ : str ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase__ )
def a ( self : Optional[int] , lowerCamelCase__ : int ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase__ )
@staticmethod
def a ( lowerCamelCase__ : str ):
"""simple docstring"""
return out_string
def a ( self : Optional[Any] , lowerCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Tuple = """"""
__UpperCamelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
__UpperCamelCase : int = True
__UpperCamelCase : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
__UpperCamelCase : Dict = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : List[str] = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def a ( self : List[str] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : Union[str, bool] = False ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[Any] = self.preprocess_text(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = self.sp_model.encode(lowerCamelCase__ )
else:
__UpperCamelCase : Any = [self.preprocess_text(lowerCamelCase__ ) for t in text]
__UpperCamelCase : Optional[int] = self.sp_model.encode(lowerCamelCase__ )
if return_tensors is True or return_tensors == "pt":
__UpperCamelCase : str = torch.tensor(lowerCamelCase__ )
return token_ids
def a ( self : Optional[int] , lowerCamelCase__ : Union[int, List[int]] ):
"""simple docstring"""
return self.sp_model.decode(lowerCamelCase__ )
def a ( self : Optional[Any] , lowerCamelCase__ : "Conversation" ):
"""simple docstring"""
__UpperCamelCase : Tuple = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
__UpperCamelCase : Any = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCamelCase__ ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCamelCase__ )
| 515
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowerCamelCase ( lowerCamelCase__ : Dict ):
lowercase__ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : List[Any] = StableDiffusionLatentUpscalePipeline
_a : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_a : Tuple = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_a : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a : List[str] = frozenset([] )
_a : Optional[Any] = True
@property
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : int = 1
lowercase__ : List[Any] = 4
lowercase__ : Optional[Any] = (16, 16)
lowercase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase__ )
return image
def UpperCAmelCase__( self ) -> str:
torch.manual_seed(0 )
lowercase__ : int = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=lowerCamelCase__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=lowerCamelCase__ , only_cross_attention=lowerCamelCase__ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
lowercase__ : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
lowercase__ : Union[str, Any] = EulerDiscreteScheduler(prediction_type="""sample""" )
lowercase__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
lowercase__ : Any = CLIPTextModel(lowerCamelCase__ )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ : List[Any] = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> str:
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ : Tuple = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ : List[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : str = """cpu"""
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ : Optional[Any] = pipe(**lowerCamelCase__ ).images
lowercase__ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowercase__ : Union[str, Any] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
lowercase__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def UpperCAmelCase__( self ) -> Dict:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCAmelCase__( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__( self ) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCAmelCase__( self ) -> Union[str, Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCAmelCase__( self ) -> Tuple:
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**lowerCamelCase__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ : Any = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ : Any = 2
lowercase__ : Union[str, Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ : Optional[int] = getattr(lowerCamelCase__ , scheduler_enum.name )
lowercase__ : str = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ : Any = pipe(**lowerCamelCase__ )[0]
outputs.append(lowerCamelCase__ )
assert check_same_shape(lowerCamelCase__ )
@require_torch_gpu
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Optional[int] = torch.manual_seed(33 )
lowercase__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowercase__ : Dict = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase__ : Tuple = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
lowercase__ : Tuple = pipe(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""latent""" ).images
lowercase__ : Optional[int] = upscaler(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCamelCase__ , output_type="""np""" , ).images[0]
lowercase__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : int = torch.manual_seed(33 )
lowercase__ : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase__ : Union[str, Any] = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
lowercase__ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
lowercase__ : Optional[int] = upscaler(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCamelCase__ , output_type="""np""" , ).images[0]
lowercase__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 200
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> int:
lowercase__ : int = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Tuple = image_size
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : Optional[Any] = do_normalize
def UpperCAmelCase__( self ) -> Any:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : str = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__( self ) -> int:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def UpperCAmelCase__( self ) -> Any:
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase__ : Optional[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ : str = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ : Union[str, Any] = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase__( self ) -> Dict:
pass
def _lowerCamelCase ( ):
lowercase__ : Tuple = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ : Optional[int] = Image.open(dataset[4]["""file"""] )
lowercase__ : Union[str, Any] = Image.open(dataset[5]["""file"""] )
lowercase__ : Optional[int] = [imagea, imagea]
return images
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__( self ) -> str:
lowercase__ : Optional[int] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ : int = prepare_images()
# test non-batched
lowercase__ : int = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase__ : Any = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ : str = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase__ : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ )
| 200
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__snake_case : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Optional[int], __snake_case : Optional[int]=8 ) -> int:
"""simple docstring"""
A__ : Dict =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A__ : Dict =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
A__ : List[Any] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
if latents is None:
A__ : List[str] =randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A__ : int =latents.to(__lowerCAmelCase )
A__ : Dict =latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : Any , lowerCAmelCase_ : Any=0 ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A__ : Tuple =torch.device(f"cuda:{gpu_id}" )
A__ : List[str] =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Any=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A__ : Optional[int] =torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A__ : Tuple =None
for cpu_offloaded_model in [self.unet, self.movq]:
A__ , A__ : Optional[int] =cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
A__ : Tuple =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : List[str] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 1_00 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self._execution_device
A__ : str =guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ : Tuple =torch.cat(__lowerCAmelCase , dim=0 )
A__ : str =image_embeds.shape[0] * num_images_per_prompt
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ : Union[str, Any] =torch.cat(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A__ : Any =image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
A__ : Optional[int] =negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
A__ : Optional[int] =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
A__ : Optional[int] =self.scheduler.timesteps
A__ : List[str] =self.unet.config.in_channels
A__ , A__ : Optional[int] =downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
# create initial latent
A__ : Optional[Any] =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A__ : Dict =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : Dict ={"""image_embeds""": image_embeds}
A__ : Optional[Any] =self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
A__ , A__ : List[str] =noise_pred.split(latents.shape[1] , dim=1 )
A__ , A__ : int =noise_pred.chunk(2 )
A__ , A__ : Dict =variance_pred.chunk(2 )
A__ : Optional[int] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A__ : str =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A__ , A__ : Optional[int] =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A__ : Tuple =self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
A__ : Any =self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A__ : Union[str, Any] =image * 0.5 + 0.5
A__ : int =image.clamp(0 , 1 )
A__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : List[str] =self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 710
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = original_name.split("." )[0]
__lowercase = key.split("." )
__lowercase = int(key_list[key_list.index(_snake_case ) - 2] )
__lowercase = int(key_list[key_list.index(_snake_case ) - 1] )
__lowercase = orig_block_num - offset
__lowercase = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = OrderedDict()
__lowercase , __lowercase = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowercase = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowercase = key[: key.find("proj" )]
__lowercase = key.replace(_snake_case , F"""patch_embeddings.{total_embed_found}.""" )
__lowercase = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowercase = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowercase = replace_key_with_offset(_snake_case , _snake_case , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowercase = replace_key_with_offset(_snake_case , _snake_case , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowercase = replace_key_with_offset(_snake_case , _snake_case , "norm1" , "before_norm" )
if "norm2" in key:
__lowercase = replace_key_with_offset(_snake_case , _snake_case , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowercase = replace_key_with_offset(_snake_case , _snake_case , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowercase = replace_key_with_offset(_snake_case , _snake_case , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowercase = key.replace("head" , "classifier" )
__lowercase = value
return new_state_dict
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = PoolFormerConfig()
# set attributes based on model_name
__lowercase = "huggingface/label-files"
__lowercase = model_name[-3:]
__lowercase = 1_0_0_0
__lowercase = "imagenet-1k-id2label.json"
__lowercase = (1, 1_0_0_0)
# set config attributes
__lowercase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_snake_case ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowercase = [2, 2, 6, 2]
__lowercase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowercase = 4.0
__lowercase = 0.9
elif size == "s24":
__lowercase = [4, 4, 1_2, 4]
__lowercase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowercase = 4.0
__lowercase = 0.9
elif size == "s36":
__lowercase = [6, 6, 1_8, 6]
__lowercase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowercase = 4.0
__lowercase = 1E-6
__lowercase = 0.9
elif size == "m36":
__lowercase = [6, 6, 1_8, 6]
__lowercase = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowercase = 4.0
__lowercase = 1E-6
__lowercase = 0.9_5
elif size == "m48":
__lowercase = [8, 8, 2_4, 8]
__lowercase = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowercase = 4.0
__lowercase = 1E-6
__lowercase = 0.9_5
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor
__lowercase = PoolFormerImageProcessor(crop_pct=_snake_case )
# Prepare image
__lowercase = prepare_img()
__lowercase = image_processor(images=_snake_case , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
__lowercase = torch.load(_snake_case , map_location=torch.device("cpu" ) )
# rename keys
__lowercase = rename_keys(_snake_case )
# create HuggingFace model and load state dict
__lowercase = PoolFormerForImageClassification(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Define image processor
__lowercase = PoolFormerImageProcessor(crop_pct=_snake_case )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowercase = model(_snake_case )
__lowercase = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowercase = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
__lowercase = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
__lowercase = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
__lowercase = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
__lowercase = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
snake_case__ : List[str] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 402
|
"""simple docstring"""
import numpy as np
def snake_case__ ( _snake_case : np.ndarray , _snake_case : float ):
"""simple docstring"""
return np.where(vector > 0 , _snake_case , (alpha * (np.exp(_snake_case ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 0
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Dict = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ : List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 144
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Union[str, Any]=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Optional[int]=None , ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : int = use_token_type_ids
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = num_choices
SCREAMING_SNAKE_CASE : Optional[int] = scope
def _lowercase ( self : Dict ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) ->List[Any]:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(
_lowercase , attention_mask=_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Dict = DistilBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = DistilBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = model(
_lowercase , attention_mask=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : int ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase__ : str =(
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] =True
UpperCAmelCase__ : Any =True
UpperCAmelCase__ : Optional[Any] =True
UpperCAmelCase__ : Dict =True
def _lowercase ( self : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_lowercase , dim=3_7 )
def _lowercase ( self : int ) ->Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase )
def _lowercase ( self : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase )
def _lowercase ( self : Any ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase )
def _lowercase ( self : int ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase )
def _lowercase ( self : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase )
def _lowercase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase )
@slow
def _lowercase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = DistilBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def _lowercase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[int] = model_class(config=_lowercase )
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Dict = torch.jit.trace(
_lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.jit.load(os.path.join(_lowercase , """traced_model.pt""" ) , map_location=_lowercase )
loaded(inputs_dict["""input_ids"""].to(_lowercase ) , inputs_dict["""attention_mask"""].to(_lowercase ) )
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 703
|
def __lowercase ( _A , _A ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __lowercase ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 446
| 0
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __A ( _a ):
'''simple docstring'''
def a__ (self ) -> int:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(A ):
_a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(A ):
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ (self ) -> Any:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def a__ (self ) -> str:
"""simple docstring"""
_a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def a__ (self ) -> List[str]:
"""simple docstring"""
import PIL.Image
_a = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=A ) as mock_cast_to_python_objects:
_a = pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] , type=Image() ) )
_a = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , A )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = pa.BufferReader(_lowerCAmelCase) if isinstance(_lowerCAmelCase , pa.Buffer) else pa.memory_map(_lowerCAmelCase)
_a = pa.ipc.open_stream(_lowerCAmelCase)
_a = f.read_all()
assert len(pa_table.to_batches()) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10])
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = pa.BufferOutputStream()
_a = pa.schema(_lowerCAmelCase) if fields else None
with ArrowWriter(stream=_lowerCAmelCase , schema=_lowerCAmelCase , writer_batch_size=_lowerCAmelCase) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1})
writer.write({'''col_1''': '''bar''', '''col_2''': 2})
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_lowerCAmelCase , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def lowerCAmelCase ():
"""simple docstring"""
_a = pa.BufferOutputStream()
_a = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''])})
with ArrowWriter(stream=_lowerCAmelCase , features=_lowerCAmelCase) as writer:
writer.write({'''labels''': 0})
writer.write({'''labels''': 1})
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_a = pa.BufferReader(output.getvalue())
_a = pa.ipc.open_stream(_lowerCAmelCase)
_a = f.read_all()
_a = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_lowerCAmelCase)
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=_lowerCAmelCase , writer_batch_size=_lowerCAmelCase , hash_salt='''split_name''' , check_duplicates=_lowerCAmelCase , ) as writer:
with pytest.raises(_lowerCAmelCase):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2])
_a = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=_lowerCAmelCase , writer_batch_size=_lowerCAmelCase , hash_salt='''split_name''' , check_duplicates=_lowerCAmelCase , ) as writer:
with pytest.raises(_lowerCAmelCase):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10)
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10)
_a = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=_lowerCAmelCase , writer_batch_size=_lowerCAmelCase , hash_salt='''split_name''' , check_duplicates=_lowerCAmelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1)
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2)
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10])
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = pa.BufferOutputStream()
_a = pa.schema(_lowerCAmelCase) if fields else None
with ArrowWriter(stream=_lowerCAmelCase , schema=_lowerCAmelCase , writer_batch_size=_lowerCAmelCase) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]})
writer.write_batch({'''col_1''': [], '''col_2''': []})
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_lowerCAmelCase , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10])
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = pa.BufferOutputStream()
_a = pa.schema(_lowerCAmelCase) if fields else None
with ArrowWriter(stream=_lowerCAmelCase , schema=_lowerCAmelCase , writer_batch_size=_lowerCAmelCase) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]}))
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_lowerCAmelCase , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10])
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = pa.BufferOutputStream()
_a = pa.schema(_lowerCAmelCase) if fields else None
with ArrowWriter(stream=_lowerCAmelCase , schema=_lowerCAmelCase , writer_batch_size=_lowerCAmelCase) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]}))
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]}))
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_lowerCAmelCase , metadata=writer._schema.metadata)
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def lowerCAmelCase ():
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a = {'col_1': pa.string(), 'col_2': pa.intaa()}
_a = os.path.join(_lowerCAmelCase , '''test.arrow''')
with ArrowWriter(path=_lowerCAmelCase , schema=pa.schema(_lowerCAmelCase)) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]})
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_lowerCAmelCase , metadata=writer._schema.metadata)
_check_output(_lowerCAmelCase , 1)
def lowerCAmelCase (__A):
"""simple docstring"""
if pa.types.is_list(_lowerCAmelCase):
return get_base_dtype(arr_type.value_type)
else:
return arr_type
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if isinstance(lst[0] , _lowerCAmelCase):
change_first_primitive_element_in_list(lst[0] , _lowerCAmelCase)
else:
_a = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32'''), pa.intaa())])
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = pa.array(TypedSequence(_lowerCAmelCase , optimized_int_type=_lowerCAmelCase))
assert get_base_dtype(arr.type) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = pa.array(OptimizedTypedSequence(_lowerCAmelCase , col=_lowerCAmelCase))
assert get_base_dtype(arr.type) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_a = copy.deepcopy(_lowerCAmelCase)
_a = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1
change_first_primitive_element_in_list(_lowerCAmelCase , _lowerCAmelCase)
_a = pa.array(OptimizedTypedSequence(_lowerCAmelCase , col=_lowerCAmelCase))
assert get_base_dtype(arr.type) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True])
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = str(tmp_path / '''dataset-train.arrow''')
try:
with ArrowWriter(path=_lowerCAmelCase) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 'mock://dataset-train.arrow'
with ArrowWriter(path=_lowerCAmelCase , storage_options=mockfs.storage_options) as writer:
assert isinstance(writer._fs , type(_lowerCAmelCase))
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1})
writer.write({'''col_1''': '''bar''', '''col_2''': 2})
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_lowerCAmelCase)
def lowerCAmelCase ():
"""simple docstring"""
_a = pa.BufferOutputStream()
with ParquetWriter(stream=_lowerCAmelCase) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1})
writer.write({'''col_1''': '''bar''', '''col_2''': 2})
_a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_a = pa.BufferReader(output.getvalue())
_a = pq.read_table(_lowerCAmelCase)
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True])
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import PIL.Image
_a = str(tmp_path / '''test_image_rgb.jpg''')
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta)).save(_lowerCAmelCase , format='''png''')
_a = pa.BufferOutputStream()
with ParquetWriter(
stream=_lowerCAmelCase , features=Features({'''image''': Image()}) , embed_local_files=_lowerCAmelCase) as writer:
writer.write({'''image''': image_path})
writer.finalize()
_a = pa.BufferReader(output.getvalue())
_a = pq.read_table(_lowerCAmelCase)
_a = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , _lowerCAmelCase)
with open(_lowerCAmelCase , '''rb''') as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCAmelCase ():
"""simple docstring"""
_a = pa.schema([pa.field('''col_1''' , pa.string() , nullable=_lowerCAmelCase)])
_a = pa.BufferOutputStream()
with ArrowWriter(stream=_lowerCAmelCase) as writer:
writer._build_writer(inferred_schema=_lowerCAmelCase)
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string())])
| 11
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_UpperCamelCase : Any = False
class UpperCAmelCase_ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : int = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ : Tuple = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Any = generator.manual_seed(0 )
lowercase__ : List[Any] = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[int] = 'cyberpunk 2077'
lowercase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : List[Any] = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
lowercase__ : Any = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase__ : List[str] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : Any = 'A painting of a squirrel eating a burger '
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : str = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
lowercase__ : Optional[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase__ : Optional[int] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : List[Any] = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase__ : Optional[int] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 599
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class lowerCamelCase ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
requires_backends(self , 'vision' )
self.check_model_type(__snake_case )
def __call__( self : Optional[Any] , __snake_case : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return super().__call__(__snake_case , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return {}, {}, {}
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
_snake_case: int = load_image(__snake_case )
_snake_case: Any = image.size
_snake_case: Optional[Any] = self.image_processor(images=__snake_case , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
_snake_case: List[str] = self.model(**__snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: List[str] = model_outputs.predicted_depth
_snake_case: str = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=__snake_case )
_snake_case: Optional[Any] = prediction.squeeze().cpu().numpy()
_snake_case: Optional[Any] = (output * 2_55 / np.max(__snake_case )).astype('uint8' )
_snake_case: Optional[Any] = Image.fromarray(__snake_case )
_snake_case: Any = {}
_snake_case: Dict = predicted_depth
_snake_case: Any = depth
return output_dict
| 273
|
'''simple docstring'''
def lowercase_ ( lowercase__ = 50 ) ->int:
_snake_case: Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 273
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : Any = LayoutLMTokenizer
lowercase : Optional[int] = LayoutLMTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
| 255
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _lowerCAmelCase(a : np.ndarray , a : int , a : int ) -> np.ndarray:
_SCREAMING_SNAKE_CASE =np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# compute the shape of the output matrix
_SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_SCREAMING_SNAKE_CASE =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_SCREAMING_SNAKE_CASE =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
return updated_arr
def _lowerCAmelCase(a : np.ndarray , a : int , a : int ) -> np.ndarray:
_SCREAMING_SNAKE_CASE =np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# compute the shape of the output matrix
_SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_SCREAMING_SNAKE_CASE =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_SCREAMING_SNAKE_CASE =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
UpperCAmelCase_ : List[str] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 255
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = create_tensor(A__ )
__lowerCamelCase = gather(A__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = [state.process_index]
__lowerCamelCase = gather_object(A__ )
assert len(A__ ) == state.num_processes, f'{gathered_obj}, {len(A__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}'
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = create_tensor(A__ )
__lowerCamelCase = broadcast(A__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
if state.is_main_process:
__lowerCamelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowerCamelCase = torch.arange(state.num_processes ).to(state.device )
__lowerCamelCase = pad_across_processes(A__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
__lowerCamelCase = create_tensor(A__ )
__lowerCamelCase = reduce(A__ , """sum""" )
__lowerCamelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(A__ , A__ ), f'{reduced_tensor} != {truth_tensor}'
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
__lowerCamelCase = create_tensor(A__ )
__lowerCamelCase = reduce(A__ , """mean""" )
__lowerCamelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(A__ , A__ ), f'{reduced_tensor} != {truth_tensor}'
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
main()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = PartialState()
state.print(f'State: {state}' )
state.print("""testing gather""" )
test_gather(A__ )
state.print("""testing gather_object""" )
test_gather_object(A__ )
state.print("""testing broadcast""" )
test_broadcast(A__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(A__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(A__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(A__ )
if __name__ == "__main__":
main()
| 80
|
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80
| 1
|
def lowerCAmelCase_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
UpperCAmelCase__ = {
"camembert-base": 512,
}
UpperCAmelCase__ = "▁"
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : List[Any]="</s>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : List[Any]="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : int=["<s>NOTUSED", "</s>NOTUSED"] , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Optional[int] , ) -> None:
"""simple docstring"""
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
__lowercase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowercase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__lowercase = len(self.fairseq_tokens_to_ids )
__lowercase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : str ) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCamelCase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : int ) -> Any:
"""simple docstring"""
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowerCamelCase__ )
__lowercase = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Any , lowerCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 703
|
from __future__ import annotations
def _A( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
__lowercase = list(range(len(UpperCamelCase__ ) ) )
__lowercase = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
__lowercase = 0
__lowercase = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
__lowercase = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowercase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
| 0
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
'''simple docstring'''
if "." in tensor_name:
lowerCAmelCase = tensor_name.split(""".""" )
for split in splits[:-1]:
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
lowerCAmelCase = new_module
lowerCAmelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowerCAmelCase = tensor_name in module._buffers
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowerCAmelCase = False
lowerCAmelCase = False
if is_buffer or not is_bitsandbytes_available():
lowerCAmelCase = False
lowerCAmelCase = False
else:
lowerCAmelCase = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowerCAmelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCAmelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCAmelCase = old_value.to(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCAmelCase = value.to("""cpu""" )
if value.dtype == torch.inta:
lowerCAmelCase = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
lowerCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , SCREAMING_SNAKE_CASE ) and fpaa_statistics is None:
lowerCAmelCase = new_value.T
lowerCAmelCase = old_value.__dict__
if is_abit:
lowerCAmelCase = bnb.nn.IntaParams(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
elif is_abit:
lowerCAmelCase = bnb.nn.Paramsabit(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(SCREAMING_SNAKE_CASE ) )
else:
if value is None:
lowerCAmelCase = old_value.to(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCAmelCase = value.to(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
if is_buffer:
lowerCAmelCase = new_value
else:
lowerCAmelCase = nn.Parameter(SCREAMING_SNAKE_CASE , requires_grad=old_value.requires_grad )
lowerCAmelCase = new_value
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if (isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(SCREAMING_SNAKE_CASE ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase , lowerCAmelCase = module.weight.shape
else:
lowerCAmelCase = module.in_features
lowerCAmelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCAmelCase = bnb.nn.LinearabitLt(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowerCAmelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCAmelCase = bnb.nn.Linearabit(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowerCAmelCase = True
# Store the module class in case we need to transpose the weight later
lowerCAmelCase = type(SCREAMING_SNAKE_CASE )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(SCREAMING_SNAKE_CASE )
if len(list(module.children() ) ) > 0:
lowerCAmelCase , lowerCAmelCase = _replace_with_bnb_linear(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_been_replaced=SCREAMING_SNAKE_CASE , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
lowerCAmelCase = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
lowerCAmelCase , lowerCAmelCase = _replace_with_bnb_linear(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , SCREAMING_SNAKE_CASE , )
return replace_with_bnb_linear(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , SCREAMING_SNAKE_CASE , )
return set_module_quantized_tensor_to_device(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCAmelCase = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase = sum(SCREAMING_SNAKE_CASE , [] )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
lowerCAmelCase = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase = list(model.named_children() )
lowerCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
lowerCAmelCase = [""".weight""", """.bias"""]
lowerCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase = name.replace(SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
| 532
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A__ ( _lowerCamelCase):
A_ : Tuple = 'wavlm'
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3_20 , _SCREAMING_SNAKE_CASE=8_00 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3_20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=(5_12, 5_12, 5_12, 5_12, 15_00) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : List[str] = feat_extract_norm
__lowerCAmelCase : str = feat_extract_activation
__lowerCAmelCase : Union[str, Any] = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = conv_bias
__lowerCAmelCase : Tuple = num_buckets
__lowerCAmelCase : List[Any] = max_bucket_distance
__lowerCAmelCase : Union[str, Any] = num_conv_pos_embeddings
__lowerCAmelCase : Optional[int] = num_conv_pos_embedding_groups
__lowerCAmelCase : Dict = len(self.conv_dim )
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Optional[int] = hidden_dropout
__lowerCAmelCase : Optional[Any] = attention_dropout
__lowerCAmelCase : Dict = activation_dropout
__lowerCAmelCase : Tuple = feat_proj_dropout
__lowerCAmelCase : List[str] = final_dropout
__lowerCAmelCase : Optional[int] = layerdrop
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : str = num_ctc_classes
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : List[Any] = do_stable_layer_norm
__lowerCAmelCase : int = use_weighted_layer_sum
__lowerCAmelCase : Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : Tuple = apply_spec_augment
__lowerCAmelCase : int = mask_time_prob
__lowerCAmelCase : List[str] = mask_time_length
__lowerCAmelCase : str = mask_time_min_masks
__lowerCAmelCase : Optional[Any] = mask_feature_prob
__lowerCAmelCase : Dict = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase : Optional[int] = num_codevectors_per_group
__lowerCAmelCase : Dict = num_codevector_groups
__lowerCAmelCase : Optional[int] = contrastive_logits_temperature
__lowerCAmelCase : int = num_negatives
__lowerCAmelCase : str = codevector_dim
__lowerCAmelCase : Any = proj_codevector_dim
__lowerCAmelCase : Union[str, Any] = diversity_loss_weight
# ctc loss
__lowerCAmelCase : Tuple = ctc_loss_reduction
__lowerCAmelCase : Dict = ctc_zero_infinity
# adapter
__lowerCAmelCase : str = add_adapter
__lowerCAmelCase : List[Any] = adapter_kernel_size
__lowerCAmelCase : List[str] = adapter_stride
__lowerCAmelCase : List[Any] = num_adapter_layers
__lowerCAmelCase : Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Optional[Any] = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = xvector_output_dim
@property
def __lowerCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 708
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( _lowerCamelCase):
A_ : Union[PIL.Image.Image, np.ndarray]
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : List[str] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Tuple = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase : str = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Optional[int] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase : Dict = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.image_encoder(_SCREAMING_SNAKE_CASE )['last_hidden_state']
__lowerCAmelCase : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase : Tuple = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[Any] = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__lowerCAmelCase : Union[str, Any] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Tuple = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase : Any = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Optional[Any] = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Any = guidance_scale > 1.0
__lowerCAmelCase : List[Any] = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.scheduler.timesteps
__lowerCAmelCase : Optional[int] = self.prior.config.num_embeddings
__lowerCAmelCase : List[str] = self.prior.config.embedding_dim
__lowerCAmelCase : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase : str = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Optional[int] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase , __lowerCAmelCase : Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
__lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase : Dict = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
__lowerCAmelCase : int = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
__lowerCAmelCase : int = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase : Dict = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 549
| 0
|
def lowercase__ ( A_: str , A_: str ) -> Optional[Any]:
"""simple docstring"""
assert x is not None
assert y is not None
__UpperCAmelCase =len(A_ )
__UpperCAmelCase =len(A_ )
# declaring the array for storing the dp values
__UpperCAmelCase =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__UpperCAmelCase =1 if x[i - 1] == y[j - 1] else 0
__UpperCAmelCase =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__UpperCAmelCase =""""""
__UpperCAmelCase , __UpperCAmelCase =m, n
while i > 0 and j > 0:
__UpperCAmelCase =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__UpperCAmelCase =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A , __A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 68
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE__ = (720, 1_280) # Height, Width
SCREAMING_SNAKE_CASE__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE__ = 1 / 100
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = 250
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = get_dataset(a , a )
for index in range(a ):
SCREAMING_SNAKE_CASE_ :Any = random.sample(range(len(a ) ) , 4 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = update_image_and_anno(
a , a , a , a , a , filter_scale=a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_ :int = random_chars(32 )
SCREAMING_SNAKE_CASE_ :Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE_ :Dict = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for anno in new_annos:
SCREAMING_SNAKE_CASE_ :Any = anno[3] - anno[1]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = anno[4] - anno[2]
SCREAMING_SNAKE_CASE_ :Any = anno[1] + width / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = anno[2] + height / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(a )
with open(F"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for label_file in glob.glob(os.path.join(a , "*.txt" ) ):
SCREAMING_SNAKE_CASE_ :List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(a ) as in_file:
SCREAMING_SNAKE_CASE_ :List[Any] = in_file.readlines()
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(a , F"{label_name}.jpg" )
SCREAMING_SNAKE_CASE_ :Dict = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_ :Dict = obj_list.rstrip("\n" ).split(" " )
SCREAMING_SNAKE_CASE_ :Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :Dict = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE_ :List[Any] = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def lowercase ( a , a , a , a , a , a = 0.0 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ :Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :Any = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE_ :List[Any] = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE_ :Any = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for i, index in enumerate(a ):
SCREAMING_SNAKE_CASE_ :Optional[int] = all_img_list[index]
path_list.append(a )
SCREAMING_SNAKE_CASE_ :Tuple = all_annos[index]
SCREAMING_SNAKE_CASE_ :Any = cva.imread(a )
if i == 0: # top-left
SCREAMING_SNAKE_CASE_ :int = cva.resize(a , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Any = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Optional[Any] = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :List[Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE_ :Dict = cva.resize(a , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[int] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Dict = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE_ :List[str] = cva.resize(a , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Dict = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE_ :Optional[Any] = cva.resize(
a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Dict = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE_ :Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase ( a ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_ :Dict = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 631
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: list ):
if len(a_ ) <= 1:
return lst
_UpperCAmelCase : List[Any] = 1
while i < len(a_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase : Optional[Any] = 1
return lst
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 257
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''ibert'''
def __init__( self : List[str] , lowerCAmelCase__ : List[Any]=3_0_5_2_2 , lowerCAmelCase__ : Optional[int]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Dict=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : List[Any]=1e-12 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Any="absolute" , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Dict="none" , **lowerCAmelCase__ : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = position_embedding_type
_UpperCAmelCase : List[str] = quant_mode
_UpperCAmelCase : Optional[int] = force_dequant
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 257
| 1
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE ={
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
__SCREAMING_SNAKE_CASE ={
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any=False , _lowerCamelCase: Union[str, Any]=False , _lowerCamelCase: str=False , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: Any=None , _lowerCamelCase: int=None , _lowerCamelCase: List[Any]=None , _lowerCamelCase: Optional[Dict[str, Any]] = None , **_lowerCamelCase: List[str] , ):
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
SCREAMING_SNAKE_CASE_ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ = '''<|endoftext|>''' if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ = '''<pad>''' if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ = re.compile(
f"[{''.join(map(_lowerCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]" )
def __getstate__( self: int ):
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self: List[Any] , _lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _A ( self: int ):
return len(self.sp_model )
def _A ( self: int , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = self.non_printing_characters_re.sub('''''' , _lowerCamelCase )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ = unicodedata.normalize('''NFC''' , _lowerCamelCase )
return text
def _A ( self: Tuple , _lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.preprocess_text(_lowerCamelCase )
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _A ( self: Union[str, Any] , _lowerCamelCase: str ):
return self.sp_model.PieceToId(_lowerCamelCase )
def _A ( self: Optional[int] , _lowerCamelCase: int ):
return self.sp_model.IdToPiece(_lowerCamelCase )
@staticmethod
def _A ( _lowerCamelCase: str ):
return out_string
def _A ( self: Tuple , _lowerCamelCase: List[str] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = []
else:
current_sub_tokens.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: Any , _lowerCamelCase: str , _lowerCamelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _A ( self: Dict , _lowerCamelCase: Union[str, List[str]] , _lowerCamelCase: Union[str, bool] = False ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = self.preprocess_text(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.sp_model.encode(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = [self.preprocess_text(_lowerCamelCase ) for t in text]
SCREAMING_SNAKE_CASE_ = self.sp_model.encode(_lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCamelCase )
return token_ids
def _A ( self: List[Any] , _lowerCamelCase: Union[int, List[int]] ):
return self.sp_model.decode(_lowerCamelCase )
def _A ( self: Union[str, Any] , _lowerCamelCase: "Conversation" ):
SCREAMING_SNAKE_CASE_ = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(_lowerCamelCase ) + f"{self.bos_token}Bot:"
)
return self.encode(text=_lowerCamelCase )
| 234
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a ():
raise RuntimeError('''CUDA out of memory.''' )
class __magic_name__ ( nn.Module):
'''simple docstring'''
def __init__( self: Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ = nn.Linear(4 , 5 )
def _A ( self: int , _lowerCamelCase: List[Any] ):
return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) )
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowerCamelCase: str ):
nonlocal batch_sizes
batch_sizes.append(_lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowerCamelCase , [1_28, 64, 32, 16, 8] )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(_lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mock_training_loop_function('''hello''' )
self.assertListEqual(_lowerCamelCase , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def _A ( self: str ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_lowerCamelCase: Union[str, Any] ):
pass
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def _A ( self: int ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowerCamelCase: str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def _A ( self: List[Any] ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def _A ( self: Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowerCamelCase: Dict ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = release_memory(_lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , _lowerCamelCase )
| 234
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(a__ , a__ )
return k
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE_ = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ = v.T
SCREAMING_SNAKE_CASE_ = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE_ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ['Adafactor', 'global_step']
for name, shape in tqdm(a__ , desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE_ = array
return tf_weights
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE_ = task_specific_params[f"""summarization_{dataset}"""]['max_position_embeddings']
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE_ = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE_ = task_specific_params
SCREAMING_SNAKE_CASE_ = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE_ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(a__ , Path(a__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ : Dict = parser.parse_args()
if args.save_dir is None:
UpperCamelCase__ : Tuple = Path(args.tf_ckpt_path).parent.name
UpperCamelCase__ : Tuple = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 714
|
def _UpperCAmelCase ( ):
"""simple docstring"""
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _UpperCAmelCase ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 )
if __name__ == "__main__":
print(solution())
| 620
| 0
|
"""simple docstring"""
from itertools import permutations
def lowercase ( _SCREAMING_SNAKE_CASE : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int = 10 ):
'''simple docstring'''
return sum(
int(''''''.join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 602
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCamelCase : str = "▁" , __UpperCamelCase : bool = True , __UpperCamelCase : Union[str, AddedToken] = "<unk>" , __UpperCamelCase : Union[str, AddedToken] = "</s>" , __UpperCamelCase : Union[str, AddedToken] = "<pad>" , )->List[Any]:
_UpperCAmelCase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
_UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_UpperCAmelCase = token_dict['''token''']
_UpperCAmelCase = Tokenizer(Unigram() )
_UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
_UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
_UpperCAmelCase = decoders.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
_UpperCAmelCase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 8_0_0_0 , __UpperCamelCase : bool = True , )->Any:
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [files]
self._tokenizer.train(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[Iterator[str], Iterator[Iterator[str]]] , __UpperCamelCase : int = 8_0_0_0 , __UpperCamelCase : bool = True , )->int:
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
self._tokenizer.train_from_iterator(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def lowercase__ ( self : int )->Dict:
_UpperCAmelCase = json.loads(self._tokenizer.to_str() )
_UpperCAmelCase = self.special_tokens['''unk''']['''id''']
_UpperCAmelCase = Tokenizer.from_str(json.dumps(__UpperCamelCase ) )
| 602
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __magic_name__ ( _a):
_UpperCAmelCase : Optional[Any] = 'mra'
def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any=5_0_2_6_5 ,__SCREAMING_SNAKE_CASE : Tuple=7_6_8 ,__SCREAMING_SNAKE_CASE : Tuple=1_2 ,__SCREAMING_SNAKE_CASE : List[Any]=1_2 ,__SCREAMING_SNAKE_CASE : List[Any]=3_0_7_2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" ,__SCREAMING_SNAKE_CASE : Optional[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[Any]=5_1_2 ,__SCREAMING_SNAKE_CASE : Optional[int]=1 ,__SCREAMING_SNAKE_CASE : List[str]=0.02 ,__SCREAMING_SNAKE_CASE : Dict=1e-5 ,__SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : Optional[Any]="full" ,__SCREAMING_SNAKE_CASE : str=0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0 ,__SCREAMING_SNAKE_CASE : Tuple=1 ,__SCREAMING_SNAKE_CASE : Dict=0 ,__SCREAMING_SNAKE_CASE : str=2 ,**__SCREAMING_SNAKE_CASE : Any ,):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE ,bos_token_id=__SCREAMING_SNAKE_CASE ,eos_token_id=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = block_per_row
UpperCAmelCase = approx_mode
UpperCAmelCase = initial_prior_first_n_blocks
UpperCAmelCase = initial_prior_diagonal_n_blocks
| 405
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _a):
_UpperCAmelCase : Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase : str = 'Pix2StructImageProcessor'
_UpperCAmelCase : Any = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = False
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __call__( self : Any ,__SCREAMING_SNAKE_CASE : Optional[Any]=None ,__SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False ,__SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = 2_0_4_8 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase = self.tokenizer
UpperCAmelCase = self.tokenizer(
text=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,return_overflowing_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,return_offsets_mapping=__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE ,return_length=__SCREAMING_SNAKE_CASE ,verbose=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,max_patches=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
UpperCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,max_patches=__SCREAMING_SNAKE_CASE ,header_text=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase = self.tokenizer(
text=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,return_overflowing_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,return_offsets_mapping=__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE ,return_length=__SCREAMING_SNAKE_CASE ,verbose=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
if "attention_mask" in text_encoding:
UpperCAmelCase = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase = text_encoding.pop("input_ids" )
else:
UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(__SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _UpperCAmelCase ( self : Union[str, Any] ,*__SCREAMING_SNAKE_CASE : Union[str, Any] ,**__SCREAMING_SNAKE_CASE : str ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ,*__SCREAMING_SNAKE_CASE : Optional[int] ,**__SCREAMING_SNAKE_CASE : Dict ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 405
| 1
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = jnp.floataa
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
lowerCamelCase__ = self.conv(_A )
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = jnp.floataa
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , a_ : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.conv(_A )
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
snake_case_ = 0.0
snake_case_ = None
snake_case_ = jnp.floataa
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ = nn.Dense(_A , dtype=self.dtype )
lowerCamelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ = nn.Dropout(self.dropout_prob )
lowerCamelCase__ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ = None
if use_nin_shortcut:
lowerCamelCase__ = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : List[Any] , a_ : Dict , a_ : List[str] , a_ : Any=True ):
"""simple docstring"""
lowerCamelCase__ = hidden_states
lowerCamelCase__ = self.norma(_A )
lowerCamelCase__ = nn.swish(_A )
lowerCamelCase__ = self.conva(_A )
lowerCamelCase__ = self.time_emb_proj(nn.swish(_A ) )
lowerCamelCase__ = jnp.expand_dims(jnp.expand_dims(_A , 1 ) , 1 )
lowerCamelCase__ = hidden_states + temb
lowerCamelCase__ = self.norma(_A )
lowerCamelCase__ = nn.swish(_A )
lowerCamelCase__ = self.dropout(_A , _A )
lowerCamelCase__ = self.conva(_A )
if self.conv_shortcut is not None:
lowerCamelCase__ = self.conv_shortcut(_A )
return hidden_states + residual
| 165
|
import os
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : Any = len(grid[0] )
__A : Tuple = len(a )
__A : Tuple = 0
__A : Any = 0
__A : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(a ):
for j in range(n_rows - 3 ):
__A : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__A : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__A : str = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__A : List[str] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__A : int = max(
a , a , a , a )
if max_product > largest:
__A : Union[str, Any] = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ) -> int:
__A : Optional[int] = []
with open(os.path.dirname(a ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
__A : Optional[Any] = [[int(a ) for i in grid[j]] for j in range(len(a ) )]
return largest_product(a )
if __name__ == "__main__":
print(solution())
| 239
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowerCAmelCase = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for value in value_array:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] )
lowerCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if dist > temp_dist:
lowerCAmelCase = temp_dist
lowerCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowerCAmelCase = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for value in value_array:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] )
lowerCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if dist > temp_dist:
lowerCAmelCase = temp_dist
lowerCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_snake_case = Dataset.from_dict(_SCREAMING_SNAKE_CASE )
return dataset
class _lowerCAmelCase ( A__ ):
'''simple docstring'''
def lowercase (self ) -> Optional[int]:
_snake_case = get_dataset()
_snake_case = make_duplicate_clusters(_A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowercase (self ) -> int:
_snake_case = get_dataset()
_snake_case = deduplicate_dataset(_A )
self.assertEqual(len(_A ) , 2 )
print(_A )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _A )
| 585
|
from scipy.stats import pearsonr
import datasets
__lowerCamelCase : Union[str, Any] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__lowerCamelCase : Optional[int] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__lowerCamelCase : Tuple = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ),reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"],)
def __UpperCamelCase ( self : int,_A : List[str],_A : Optional[int],_A : int=False ):
"""simple docstring"""
if return_pvalue:
SCREAMING_SNAKE_CASE_ : Any = pearsonr(_A,_A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_A,_A )[0] )}
| 216
| 0
|
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000 )-> int:
_SCREAMING_SNAKE_CASE : List[str] = 1, 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(1 , n + 1 ):
_SCREAMING_SNAKE_CASE : int = prev_numerator + 2 * prev_denominator
_SCREAMING_SNAKE_CASE : Optional[int] = prev_numerator + prev_denominator
if len(str(__SCREAMING_SNAKE_CASE ) ) > len(str(__SCREAMING_SNAKE_CASE ) ):
result.append(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = numerator
_SCREAMING_SNAKE_CASE : Union[str, Any] = denominator
return len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"{solution() = }")
| 706
|
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 635
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCamelCase : Any = emb.weight.data
return lin_layer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Any:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
_lowerCamelCase : List[str] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCamelCase : Any = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
_lowerCamelCase : Optional[Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
_lowerCamelCase : int = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
_lowerCamelCase : Tuple = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
_lowerCamelCase : Union[str, Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
_lowerCamelCase : Union[str, Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
_lowerCamelCase : int = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
_lowerCamelCase : Tuple = key.replace("final_layer_norm" , "ff_layer_norm" )
_lowerCamelCase : List[str] = state_dict[old_key]
return new_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[Any] = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
_lowerCamelCase : Dict = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_lowerCamelCase ):
_lowerCamelCase : List[str] = torch.load(_lowerCamelCase )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCamelCase : Any = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = os.path.join(
_lowerCamelCase , weights_name.replace(".bin" , F"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
_lowerCamelCase : List[str] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCamelCase : int = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
_lowerCamelCase : Optional[int] = {}
for idx, shard in enumerate(_lowerCamelCase ):
_lowerCamelCase : int = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin""" )
_lowerCamelCase : str = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
_lowerCamelCase : Optional[Any] = shard_file
# Add the metadata
_lowerCamelCase : Union[str, Any] = {"total_size": total_size}
_lowerCamelCase : Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + "\n"
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase , _lowerCAmelCase : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCAmelCase : List[Any] = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCAmelCase : Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 46
|
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
| 0
|
from __future__ import annotations
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:list[list[int]] ):
snake_case__ = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_a ) != 0:
snake_case__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_a ) != cols:
raise error
for value in row:
if not isinstance(_a , (int, float) ):
raise error
snake_case__ = rows
else:
snake_case__ = []
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:int , _a:int ):
snake_case__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_a ).determinant()
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int , _a:int ):
if (row + column) % 2 == 0:
return self.get_minor(_a , _a )
return -1 * self.get_minor(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return Matrix(
[
[self.get_minor(_a , _a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self:Any ):
return str(self.rows )
def __str__( self:int ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_a ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:list[int] , _a:int | None = None ):
snake_case__ = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_a , _a ):
raise type_error
for value in row:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_a )
else:
snake_case__ = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self:str , _a:list[int] , _a:int | None = None ):
snake_case__ = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_a , _a ):
raise type_error
for value in column:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
snake_case__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
snake_case__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self:Any , _a:object ):
if not isinstance(_a , _a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self:int , _a:object ):
return not self == other
def __neg__( self:int ):
return self * -1
def __add__( self:Dict , _a:Matrix ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self:Any , _a:Matrix ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self:List[Any] , _a:Matrix | int | float ):
if isinstance(_a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_a , _a ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_a , _a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self:Tuple , _a:int ):
if not isinstance(_a , _a ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
snake_case__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:List[str] , _a:list[int] , _a:list[int] ):
return sum(row[i] * column[i] for i in range(len(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = ['pixel_values']
def __init__( self:List[str] , _a:bool = True , _a:Dict[str, int] = None , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:bool = True , _a:Dict[str, int] = None , _a:bool = True , _a:Union[int, float] = 1 / 2_55 , _a:bool = True , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = True , **_a:Union[str, Any] , ):
super().__init__(**_a )
snake_case__ = size if size is not None else {'''shortest_edge''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a )
snake_case__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case__ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case__ = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self:str , _a:np.ndarray , _a:Dict[str, int] , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:Optional[Union[str, ChannelDimension]] = None , **_a:str , ):
snake_case__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:np.ndarray , _a:Dict[str, int] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Any , ):
snake_case__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:np.ndarray , _a:Union[int, float] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:List[Any] , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:np.ndarray , _a:Union[float, List[float]] , _a:Union[float, List[float]] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Tuple , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:ImageInput , _a:bool = None , _a:Dict[str, int] = None , _a:PILImageResampling = None , _a:bool = None , _a:int = None , _a:bool = None , _a:float = None , _a:bool = None , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = None , _a:Optional[Union[str, TensorType]] = None , _a:Optional[ChannelDimension] = ChannelDimension.FIRST , **_a:Any , ):
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(_a , param_name='''size''' , default_to_square=_a )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case__ = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(_a ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
snake_case__ = [to_channel_dimension_format(_a , _a ) for image in images]
snake_case__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 208
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE : Optional[Any] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
super().__init__()
self.register_modules(
prior=__snake_case , image_encoder=__snake_case , image_processor=__snake_case , scheduler=__snake_case , renderer=__snake_case , )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if latents is None:
snake_case = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case = latents.to(__snake_case )
snake_case = latents * scheduler.init_noise_sigma
return latents
def a_ ( self , __snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case = torch.device(F'''cuda:{gpu_id}''' )
snake_case = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
@property
def a_ ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__snake_case , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , ):
if isinstance(__snake_case , __snake_case ) and isinstance(image[0] , torch.Tensor ):
snake_case = torch.cat(__snake_case , axis=0 ) if image[0].ndim == 4 else torch.stack(__snake_case , axis=0 )
if not isinstance(__snake_case , torch.Tensor ):
snake_case = self.image_processor(__snake_case , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
snake_case = image.to(dtype=self.image_encoder.dtype , device=__snake_case )
snake_case = self.image_encoder(__snake_case )['''last_hidden_state''']
snake_case = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
snake_case = image_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
snake_case = torch.zeros_like(__snake_case )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self , __snake_case , __snake_case = 1 , __snake_case = 2_5 , __snake_case = None , __snake_case = None , __snake_case = 4.0 , __snake_case = 6_4 , __snake_case = "pil" , __snake_case = True , ):
if isinstance(__snake_case , PIL.Image.Image ):
snake_case = 1
elif isinstance(__snake_case , torch.Tensor ):
snake_case = image.shape[0]
elif isinstance(__snake_case , __snake_case ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
snake_case = len(__snake_case )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__snake_case )}''' )
snake_case = self._execution_device
snake_case = batch_size * num_images_per_prompt
snake_case = guidance_scale > 1.0
snake_case = self._encode_image(__snake_case , __snake_case , __snake_case , __snake_case )
# prior
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
snake_case = self.scheduler.timesteps
snake_case = self.prior.config.num_embeddings
snake_case = self.prior.config.embedding_dim
snake_case = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
snake_case = latents.reshape(latents.shape[0] , __snake_case , __snake_case )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case = self.scheduler.scale_model_input(__snake_case , __snake_case )
snake_case = self.prior(
__snake_case , timestep=__snake_case , proj_embedding=__snake_case , ).predicted_image_embedding
# remove the variance
snake_case , snake_case = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
snake_case , snake_case = noise_pred.chunk(2 )
snake_case = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
snake_case = self.scheduler.step(
__snake_case , timestep=__snake_case , sample=__snake_case , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__snake_case )
snake_case = []
for i, latent in enumerate(__snake_case ):
print()
snake_case = self.renderer.decode(
latent[None, :] , __snake_case , size=__snake_case , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(__snake_case )
snake_case = torch.stack(__snake_case )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
snake_case = images.cpu().numpy()
if output_type == "pil":
snake_case = [self.numpy_to_pil(__snake_case ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__snake_case )
| 550
|
from math import factorial
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case ):
snake_case = real
if isinstance(__snake_case , __snake_case ):
snake_case = [1] * rank
else:
snake_case = rank
def __repr__( self ):
return (
F'''{self.real}+'''
F'''{"+".join(str(__snake_case )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def a_ ( self ):
snake_case = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __snake_case )
def __add__( self , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
return Dual(self.real + other , self.duals )
snake_case = self.duals.copy()
snake_case = other.duals.copy()
if len(__snake_case ) > len(__snake_case ):
o_dual.extend([1] * (len(__snake_case ) - len(__snake_case )) )
elif len(__snake_case ) < len(__snake_case ):
s_dual.extend([1] * (len(__snake_case ) - len(__snake_case )) )
snake_case = []
for i in range(len(__snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __snake_case )
__magic_name__ = __add__
def __sub__( self , __snake_case ):
return self + other * -1
def __mul__( self , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
snake_case = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __snake_case )
snake_case = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __snake_case )
__magic_name__ = __mul__
def __truediv__( self , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
snake_case = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __snake_case )
raise ValueError
def __floordiv__( self , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
snake_case = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __snake_case )
raise ValueError
def __pow__( self , __snake_case ):
if n < 0 or isinstance(__snake_case , __snake_case ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
snake_case = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if not callable(UpperCamelCase_ ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(UpperCamelCase_ ,(float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
raise ValueError('''differentiate() requires an int as input for order''' )
snake_case = Dual(UpperCamelCase_ ,1 )
snake_case = func(UpperCamelCase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 550
| 1
|
from typing import Any
def __lowerCAmelCase ( __snake_case ):
if not input_list:
return []
__lowerCAmelCase = [input_list.count(lowercase__ ) for value in input_list]
__lowerCAmelCase = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __snake_case ).groups()[0]
class _UpperCamelCase (a_ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Dict:
__lowerCAmelCase = file_names
__lowerCAmelCase = image_transform
__lowerCAmelCase = label_to_id
def __len__( self )-> Optional[int]:
return len(self.file_names )
def __getitem__( self , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = self.file_names[idx]
__lowerCAmelCase = PIL.Image.open(__UpperCamelCase )
__lowerCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
__lowerCAmelCase = self.image_transform(__UpperCamelCase )
__lowerCAmelCase = extract_label(__UpperCamelCase )
if self.label_to_id is not None:
__lowerCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( __snake_case , __snake_case ):
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = config["image_size"]
if not isinstance(__snake_case , (list, tuple) ):
__lowerCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case , __snake_case )
# Grab all the image filenames
__lowerCAmelCase = [os.path.join(args.data_dir , __snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase = int(0.8 * len(__snake_case ) )
__lowerCAmelCase = random_perm[:cut]
__lowerCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase = Compose([RandomResizedCrop(__snake_case , scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__snake_case , label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__snake_case , label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
__lowerCAmelCase = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = create_model("resnet50d" , pretrained=__snake_case , num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase = False
for param in model.get_classifier().parameters():
__lowerCAmelCase = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase = OneCycleLR(optimizer=__snake_case , max_lr=__snake_case , epochs=__snake_case , steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
__lowerCAmelCase = None
else:
__lowerCAmelCase = int(training_difference.replace("step_" , "" ) )
__lowerCAmelCase = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case , __snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase = accelerator.skip_first_batches(__snake_case , __snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = torch.nn.functional.cross_entropy(__snake_case , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case , __snake_case ):
__lowerCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} , step=__snake_case , )
if checkpointing_steps == "epoch":
__lowerCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__snake_case , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__snake_case , default=__snake_case , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__snake_case , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 290
| 0
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCamelCase : Optional[int] =Mapping[str, np.ndarray]
_UpperCamelCase : Optional[int] =Mapping[str, Any] # Is a nested dict.
_UpperCamelCase : Union[str, Any] =0.01
@dataclasses.dataclass(frozen=__snake_case )
class UpperCAmelCase__ :
__snake_case : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__snake_case : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__snake_case : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__snake_case : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__snake_case : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__snake_case : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__snake_case : Optional[str] = None
# Templates used to generate this protein (prediction-only)
__snake_case : Optional[Sequence[str]] = None
# Chain corresponding to each parent
__snake_case : Optional[Sequence[int]] = None
def a__ (__lowercase :str ) -> Protein:
_A : List[str] = R'''(\[[A-Z]+\]\n)'''
_A : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_A : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
_A : List[str] = ["N", "CA", "C"]
_A : int = None
_A : List[Any] = None
_A : Any = None
for g in groups:
if "[PRIMARY]" == g[0]:
_A : int = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_A : Optional[Any] = '''X''' # FIXME: strings are immutable
_A : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_A : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_A : Tuple = np.array(__lowercase )
_A : Optional[int] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_A : Union[str, Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_A : Union[str, Any] = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
_A : Union[str, Any] = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_A : List[str] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def a__ (__lowercase :Protein , __lowercase :int = 0 ) -> List[str]:
_A : List[str] = []
_A : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
_A : str = prot.parents
_A : List[str] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_A : str = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_A : Any = ['''N/A''']
pdb_headers.append(f"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def a__ (__lowercase :Protein , __lowercase :str ) -> str:
_A : List[str] = []
_A : List[str] = pdb_str.split('''\n''' )
_A : Optional[Any] = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
_A : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_A : Union[str, Any] = []
if prot.parents_chain_index is not None:
_A : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_A : Optional[int] = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_A : Optional[Any] = parent_dict.get(str(__lowercase ) , ['''N/A'''] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_A : Union[str, Any] = [['''N/A''']]
def make_parent_line(__lowercase :Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_A : List[Any] = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_A : Any = parents_per_chain[chain_counter]
else:
_A : List[str] = ['''N/A''']
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def a__ (__lowercase :Protein ) -> str:
_A : List[Any] = residue_constants.restypes + ['''X''']
def res_atoa(__lowercase :int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
_A : int = residue_constants.atom_types
_A : List[str] = []
_A : Dict = prot.atom_mask
_A : str = prot.aatype
_A : Optional[int] = prot.atom_positions
_A : List[Any] = prot.residue_index.astype(np.intaa )
_A : Tuple = prot.b_factors
_A : int = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
_A : Dict = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_A : str = aatype.shape[0]
_A : List[str] = 1
_A : List[str] = 0
_A : Optional[int] = string.ascii_uppercase
_A : List[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_A : Optional[int] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_A : Optional[int] = '''ATOM'''
_A : int = atom_name if len(__lowercase ) == 4 else f""" {atom_name}"""
_A : int = ''''''
_A : Optional[int] = ''''''
_A : str = 1.00
_A : Any = atom_name[0] # Protein supports only C, N, O, S, this works.
_A : Optional[Any] = ''''''
_A : int = '''A'''
if chain_index is not None:
_A : int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_A : Tuple = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_A : str = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_A : int = True
_A : int = chain_index[i + 1]
if should_terminate:
# Close the chain.
_A : Optional[Any] = '''TER'''
_A : int = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__lowercase )
def a__ (__lowercase :Protein ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def a__ (__lowercase :FeatureDict , __lowercase :ModelOutput , __lowercase :Optional[np.ndarray] = None , __lowercase :Optional[np.ndarray] = None , __lowercase :Optional[str] = None , __lowercase :Optional[Sequence[str]] = None , __lowercase :Optional[Sequence[int]] = None , ) -> Protein:
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , )
| 206
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_UpperCamelCase : List[str] =HfArgumentParser(InitializationArguments)
_UpperCamelCase : Dict =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_UpperCamelCase : List[Any] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_UpperCamelCase : str ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_UpperCamelCase : int =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_UpperCamelCase : List[str] =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 206
| 1
|
import string
from math import logaa
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
__lowerCamelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowerCamelCase = corpus_without_punctuation.split("""\n""" )
__lowerCamelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : Any=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80
| 0
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case ( A__ ):
if isinstance(A__ ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase_ :
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ) -> Dict:
UpperCAmelCase_ : Tuple = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str ) -> List[Any]:
UpperCAmelCase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Optional[int] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = after_output[0]
UpperCAmelCase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(
input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : str = to_atuple(vision_model.config.image_size )
UpperCAmelCase_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase_ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase_ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase_ : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> str:
pt_model.to(lowerCAmelCase_ )
pt_model.eval()
# prepare inputs
UpperCAmelCase_ : Dict = inputs_dict
UpperCAmelCase_ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase_ : int = pt_model(**lowerCAmelCase_ ).to_tuple()
UpperCAmelCase_ : int = fx_model(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = fx_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_flax=lowerCAmelCase_ )
pt_model_loaded.to(lowerCAmelCase_ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase_ : Tuple = pt_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output_loaded.numpy() , 4e-2 )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ) -> Any:
UpperCAmelCase_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = VisionTextDualEncoderModel(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
UpperCAmelCase_ : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Tuple:
UpperCAmelCase_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = VisionTextDualEncoderModel(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase_ )
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ : int = config_inputs_dict.pop("vision_config" )
UpperCAmelCase_ : int = config_inputs_dict.pop("text_config" )
UpperCAmelCase_ : Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.check_equivalence_flax_to_pt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_pretrained_model_and_inputs()
UpperCAmelCase_ : List[Any] = model_a(**lowerCAmelCase_ )
UpperCAmelCase_ : int = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model_a(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = after_outputs[0]
UpperCAmelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-5 )
@require_flax
class UpperCamelCase_ (__A , unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , )
UpperCAmelCase_ : List[Any] = 13
UpperCAmelCase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase_ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase_ : Optional[Any] = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FlaxViTModel(lowerCAmelCase_ )
UpperCAmelCase_ : Any = FlaxBertModel(lowerCAmelCase_ )
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = FlaxViTModelTester(self )
UpperCAmelCase_ : Optional[int] = FlaxBertModelTester(self )
UpperCAmelCase_ : Any = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Any = vision_config_and_inputs
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase_ (__A , unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , )
UpperCAmelCase_ : Any = 13
UpperCAmelCase_ : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase_ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase_ : int = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ : int = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> str:
UpperCAmelCase_ : Tuple = FlaxCLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(lowerCAmelCase_ )
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
UpperCAmelCase_ : List[Any] = FlaxCLIPVisionModelTester(self )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModelTester(self )
UpperCAmelCase_ : List[str] = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Any = vision_config_and_inputs
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
UpperCAmelCase_ : Tuple = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ : int = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : Optional[int] = model(**lowerCAmelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase_ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase_ , atol=1e-3 ) )
| 95
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = n
UpperCamelCase__ :Tuple = [None] * self.n
UpperCamelCase__ :str = 0 # index of the first element
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Dict = 0
def __len__( self ):
'''simple docstring'''
return self.size
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.size == 0
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCamelCase__ :List[Any] = data
UpperCamelCase__ :List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCamelCase__ :Dict = self.array[self.front]
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Union[str, Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 280
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=lowercase ):
"""simple docstring"""
_lowerCamelCase = ["onnx"]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 675
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : int = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a_ : Any = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
a_ : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RoFormerTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents
):
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab()
lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
| 675
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowercase ( snake_case ):
"""simple docstring"""
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def __lowercase ( snake_case ):
"""simple docstring"""
for char in word:
__magic_name__ :Union[str, Any] = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Any = set()
for token in tokens:
__magic_name__ :Dict = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
__magic_name__ :str = list(snake_case )
return word_list
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__magic_name__ :int = max([len(snake_case ) for w in chinese_word_set] )
__magic_name__ :Any = bert_tokens
__magic_name__ , __magic_name__ :List[Any] = 0, len(snake_case )
while start < end:
__magic_name__ :str = True
if is_chinese(bert_word[start] ):
__magic_name__ :Optional[Any] = min(end - start, snake_case )
for i in range(snake_case, 1, -1 ):
__magic_name__ :Any = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
__magic_name__ :Optional[int] = '''##''' + bert_word[j]
__magic_name__ :List[str] = start + i
__magic_name__ :Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for i in range(0, len(snake_case ), 1_0_0 ):
__magic_name__ :List[str] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
__magic_name__ :List[str] = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
__magic_name__ :Union[str, Any] = []
for i in range(0, len(snake_case ), 1_0_0 ):
__magic_name__ :str = bert_tokenizer(lines[i : i + 1_0_0], add_special_tokens=snake_case, truncation=snake_case, max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(snake_case ) == len(snake_case )
__magic_name__ :Union[str, Any] = []
for input_ids, chinese_word in zip(snake_case, snake_case ):
__magic_name__ :Any = []
for id in input_ids:
__magic_name__ :List[Any] = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
__magic_name__ :Dict = add_sub_symbol(snake_case, snake_case )
__magic_name__ :Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
__magic_name__ :Optional[int] = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def __lowercase ( snake_case ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
__magic_name__ :Union[str, Any] = f.readlines()
__magic_name__ :int = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ :List[Any] = LTP(args.ltp ) # faster in GPU device
__magic_name__ :List[str] = BertTokenizer.from_pretrained(args.bert )
__magic_name__ :Optional[int] = prepare_ref(snake_case, snake_case, snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
__magic_name__ :Dict = [json.dumps(snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
main(args)
| 180
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''', [None, '''v2'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = hf_hub_url(repo_id=snake_case, path=snake_case, revision=snake_case )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case )}'''
| 180
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=7 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Union[str, Any]=18 , __magic_name__ : Optional[int]=30 , __magic_name__ : int=400 , __magic_name__ : Any=True , __magic_name__ : Tuple=None , __magic_name__ : Optional[int]=True , __magic_name__ : str=None , __magic_name__ : Optional[Any]=True , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_flip_channel_order
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Dict = MobileViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = MobileViTImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "do_center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "do_flip_channel_order" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 48
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.48145466, 0.4578275, 0.40821073] , _UpperCamelCase=[0.26862954, 0.26130258, 0.27577711] , _UpperCamelCase=True , ) -> Dict:
lowerCAmelCase_ = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_convert_rgb
def __a ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __a ( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ) -> Dict:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
lowerCAmelCase_ , lowerCAmelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCAmelCase_ = [torch.from_numpy(_UpperCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =ChineseCLIPImageProcessor if is_vision_available() else None
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=_UpperCamelCase )
@property
def __a ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Any:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __a ( self ) -> str:
pass
def __a ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =ChineseCLIPImageProcessor if is_vision_available() else None
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_UpperCamelCase )
lowerCAmelCase_ = 3
@property
def __a ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> int:
pass
def __a ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 290
| 0
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Optional[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Dict , snake_case__ :List[Any] ) -> int:
return max(metric_fn(snake_case__ , snake_case__ ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int , snake_case__ :List[str] ) -> Dict:
_lowercase = [line.strip() for line in open(snake_case__ , 'r' ).readlines()]
_lowercase = []
if args.gold_data_mode == "qa":
_lowercase = pd.read_csv(snake_case__ , sep='\t' , header=snake_case__ )
for answer_list in data[1]:
_lowercase = ast.literal_eval(snake_case__ )
answers.append(snake_case__ )
else:
_lowercase = [line.strip() for line in open(snake_case__ , 'r' ).readlines()]
_lowercase = [[reference] for reference in references]
_lowercase = _lowercase = _lowercase = 0
for prediction, ground_truths in zip(snake_case__ , snake_case__ ):
total += 1
em += metric_max_over_ground_truths(snake_case__ , snake_case__ , snake_case__ )
fa += metric_max_over_ground_truths(snake_case__ , snake_case__ , snake_case__ )
_lowercase = 100.0 * em / total
_lowercase = 100.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :Union[str, Any] , snake_case__ :List[str] ) -> Tuple:
_lowercase = args.k
_lowercase = [line.strip() for line in open(snake_case__ , 'r' ).readlines()]
_lowercase = [line.strip() for line in open(snake_case__ , 'r' ).readlines()]
_lowercase = _lowercase = 0
for hypo, reference in zip(snake_case__ , snake_case__ ):
_lowercase = set(hypo.split('\t' )[:k] )
_lowercase = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowercase = 100.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :List[str] , snake_case__ :str ) -> Dict:
def strip_title(snake_case__ :Any ):
if title.startswith('"' ):
_lowercase = title[1:]
if title.endswith('"' ):
_lowercase = title[:-1]
return title
_lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case__ , return_tensors='pt' , padding=snake_case__ , truncation=snake_case__ , )['input_ids'].to(args.device )
_lowercase = rag_model.rag.question_encoder(snake_case__ )
_lowercase = question_enc_outputs[0]
_lowercase = rag_model.retriever(
snake_case__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
_lowercase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowercase = []
for docs in all_docs:
_lowercase = [strip_title(snake_case__ ) for title in docs['title']]
provenance_strings.append('\t'.join(snake_case__ ) )
return provenance_strings
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Dict , snake_case__ :Tuple ) -> int:
with torch.no_grad():
_lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case__ , return_tensors='pt' , padding=snake_case__ , truncation=snake_case__ )
_lowercase = inputs_dict.input_ids.to(args.device )
_lowercase = inputs_dict.attention_mask.to(args.device )
_lowercase = rag_model.generate( # rag_model overwrites generate
snake_case__ , attention_mask=snake_case__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowercase = rag_model.retriever.generator_tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
if args.print_predictions:
for q, a in zip(snake_case__ , snake_case__ ):
logger.info('Q: {} - A: {}'.format(snake_case__ , snake_case__ ) )
return answers
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=snake_case__ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=snake_case__ , choices=['exact', 'compressed', 'legacy'] , type=snake_case__ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=snake_case__ , type=snake_case__ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=snake_case__ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=snake_case__ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=snake_case__ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=snake_case__ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=snake_case__ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=snake_case__ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=snake_case__ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=snake_case__ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=snake_case__ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
_lowercase = parser.parse_args()
_lowercase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> Any:
_lowercase = {}
if args.model_type is None:
_lowercase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
_lowercase = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
_lowercase = args.n_docs
if args.index_name is not None:
_lowercase = args.index_name
if args.index_path is not None:
_lowercase = args.index_path
else:
_lowercase = BartForConditionalGeneration
_lowercase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , snake_case__ )
_lowercase = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
_lowercase = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(snake_case__ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(snake_case__ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
_lowercase = RagRetriever.from_pretrained(snake_case__ , **snake_case__ )
_lowercase = model_class.from_pretrained(snake_case__ , retriever=snake_case__ , **snake_case__ )
model.retriever.init_retrieval()
else:
_lowercase = model_class.from_pretrained(snake_case__ , **snake_case__ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
_lowercase = []
for line in tqdm(snake_case__ ):
questions.append(line.strip() )
if len(snake_case__ ) == args.eval_batch_size:
_lowercase = evaluate_batch_fn(snake_case__ , snake_case__ , snake_case__ )
preds_file.write('\n'.join(snake_case__ ) + '\n' )
preds_file.flush()
_lowercase = []
if len(snake_case__ ) > 0:
_lowercase = evaluate_batch_fn(snake_case__ , snake_case__ , snake_case__ )
preds_file.write('\n'.join(snake_case__ ) )
preds_file.flush()
score_fn(snake_case__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case = get_args()
main(args)
| 701
|
snake_case = 8.3_144_598
def SCREAMING_SNAKE_CASE__ ( snake_case__ :float , snake_case__ :float ) -> float:
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
snake_case = 3_0_0
snake_case = 2_8
snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 535
| 0
|
"""simple docstring"""
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = val
lowerCAmelCase = None
lowerCAmelCase = None
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase = Node(_snake_case )
else:
self.left.insert(_snake_case )
elif val > self.val:
if self.right is None:
lowerCAmelCase = Node(_snake_case )
else:
self.right.insert(_snake_case )
else:
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
# Recursive traversal
if root:
inorder(root.left , _UpperCAmelCase )
res.append(root.val )
inorder(root.right , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
# Build BST
if len(_UpperCAmelCase ) == 0:
return arr
lowerCAmelCase = Node(arr[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase = []
inorder(_UpperCAmelCase , _UpperCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE = 10**9 ) -> int:
"""simple docstring"""
__a = 1
__a = 2
__a = 0
__a = 0
__a = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__a = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 582
| 0
|
"""simple docstring"""
__UpperCAmelCase = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__UpperCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowercase__ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowercase__ ( lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowercase__ ( ) -> None:
'''simple docstring'''
a__ : int = "Morse code here!"
print(lowerCAmelCase__ )
a__ : Any = encrypt(lowerCAmelCase__ )
print(lowerCAmelCase__ )
a__ : int = decrypt(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 703
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __UpperCAmelCase :
def __init__( self : List[str] , a_ : Any , a_ : Dict=13 , a_ : Optional[Any]=7 , a_ : int=6 , a_ : Union[str, Any]=17 , a_ : List[str]=23 , a_ : Optional[int]=11 , a_ : Optional[int]=True , ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = parent
a__ : Tuple = batch_size
a__ : List[str] = seq_length
a__ : List[str] = act_dim
a__ : List[str] = state_dim
a__ : Tuple = hidden_size
a__ : int = max_length
a__ : Tuple = is_training
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
a__ : str = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
a__ : int = floats_tensor((self.batch_size, self.seq_length, 1) )
a__ : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
a__ : List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
a__ : str = random_attention_mask((self.batch_size, self.seq_length) )
a__ : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase ( self : Tuple , a_ : List[str] , a_ : str , a_ : Optional[Any] , a_ : Dict , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[Any] , ) -> str:
'''simple docstring'''
a__ : Union[str, Any] = DecisionTransformerModel(config=a_ )
model.to(a_ )
model.eval()
a__ : int = model(a_ , a_ , a_ , a_ , a_ , a_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = config_and_inputs
a__ : int = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = ()
__lowerCamelCase : Dict = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : Dict = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : str = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Any = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Any = False
__lowerCamelCase : Tuple = False
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
a__ : int = DecisionTransformerModelTester(self )
a__ : Optional[int] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[str] = DecisionTransformerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(a_ )
a__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : Optional[Any] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(a_ )] , a_ )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
a__ : int = 2 # number of steps of autoregressive prediction we will perform
a__ : Union[str, Any] = 10 # defined by the RL environment, may be normalized
a__ : Union[str, Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
a__ : Optional[Any] = model.to(a_ )
a__ : Tuple = model.config
torch.manual_seed(0 )
a__ : Optional[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=a_ , dtype=torch.floataa ) # env.reset()
a__ : str = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=a_ )
a__ : List[Any] = torch.tensor(a_ , device=a_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
a__ : Tuple = state
a__ : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=a_ , dtype=torch.floataa )
a__ : Optional[Any] = torch.zeros(1 , 0 , device=a_ , dtype=torch.floataa )
a__ : List[str] = torch.tensor(0 , device=a_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(a_ ):
a__ : Tuple = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a_ )] , dim=1 )
a__ : List[str] = torch.cat([rewards, torch.zeros(1 , 1 , device=a_ )] , dim=1 )
a__ : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
a__ , a__ , a__ : List[Any] = model(
states=a_ , actions=a_ , rewards=a_ , returns_to_go=a_ , timesteps=a_ , attention_mask=a_ , return_dict=a_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
a__ , a__ , a__ , a__ : List[str] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=a_ , dtype=torch.floataa ),
1.0,
False,
{},
)
a__ : List[Any] = action_pred[0, -1]
a__ : Any = torch.cat([states, state] , dim=1 )
a__ : Dict = returns_to_go[0, -1] - reward
a__ : int = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
a__ : int = torch.cat(
[timesteps, torch.ones((1, 1) , device=a_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 251
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = "T5Config"
def _a ( lowercase__ : jnp.array , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = jnp.zeros_like(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
SCREAMING_SNAKE_CASE__ : int = shifted_input_ids.at[:, 0].set(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.where(shifted_input_ids == -1_00 , lowercase__ , lowercase__ )
return shifted_input_ids
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'mt5'
lowercase_ = MTaConfig
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'mt5'
lowercase_ = MTaConfig
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'mt5'
lowercase_ = MTaConfig
| 85
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = CamembertTokenizer
lowercase_ = CamembertTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>'
SCREAMING_SNAKE_CASE__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a_ ) , 1004 )
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ : str = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
| 85
| 1
|
"""simple docstring"""
def __UpperCamelCase ( snake_case__ ):
return " ".join(
"""""".join(word[::-1] ) if len(snake_case__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 480
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 480
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCAmelCase_ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCAmelCase_ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __magic_name__( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="auto" , __UpperCAmelCase=-1 , __UpperCAmelCase=0.9 , __UpperCAmelCase=5 , __UpperCAmelCase=500 , __UpperCAmelCase="gpt2-large" , __UpperCAmelCase=-1 , __UpperCAmelCase=1024 , __UpperCAmelCase=25 , __UpperCAmelCase=5 , __UpperCAmelCase=True , __UpperCAmelCase=25 , ):
lowerCAmelCase__ : Tuple = compute_mauve(
p_text=__UpperCAmelCase , q_text=__UpperCAmelCase , p_features=__UpperCAmelCase , q_features=__UpperCAmelCase , p_tokens=__UpperCAmelCase , q_tokens=__UpperCAmelCase , num_buckets=__UpperCAmelCase , pca_max_data=__UpperCAmelCase , kmeans_explained_var=__UpperCAmelCase , kmeans_num_redo=__UpperCAmelCase , kmeans_max_iter=__UpperCAmelCase , featurize_model_name=__UpperCAmelCase , device_id=__UpperCAmelCase , max_text_length=__UpperCAmelCase , divergence_curve_discretization_size=__UpperCAmelCase , mauve_scaling_factor=__UpperCAmelCase , verbose=__UpperCAmelCase , seed=__UpperCAmelCase , )
return out
| 678
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = BertConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : List[str] = BertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 686
|
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
| 686
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_A : Any = logging.get_logger(__name__)
_A : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase__ : Any = k.replace(UpperCAmelCase , UpperCAmelCase )
if k.startswith('''encoder''' ):
lowerCamelCase__ : Any = k.replace('''.attn''' , '''.self_attn''' )
lowerCamelCase__ : Optional[Any] = k.replace('''norm1''' , '''self_attn_layer_norm''' )
lowerCamelCase__ : Tuple = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
lowerCamelCase__ : List[Any] = k.replace('''norm1''' , '''self_attn_layer_norm''' )
lowerCamelCase__ : Optional[int] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
lowerCamelCase__ : Dict = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
lowerCamelCase__ : List[str] = sd.pop(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
lowerCamelCase__ : Any = v
_A : int = ['START']
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : Dict = model['''model''']
lowerCamelCase__ : List[str] = BlenderbotConfig.from_json_file(UpperCAmelCase )
lowerCamelCase__ : Tuple = BlenderbotForConditionalGeneration(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = m.model.state_dict().keys()
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase__ : List[str] = rename_state_dict_key(UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase__ : List[str] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase )
m.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
m.half()
m.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_A : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 315
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCamelCase__ : List[str] = 6
lowerCamelCase__ : Any = 128
lowerCamelCase__ : Tuple = (2, 2, 18, 2)
lowerCamelCase__ : int = (4, 8, 16, 32)
elif "large" in model_name:
lowerCamelCase__ : Any = 12
lowerCamelCase__ : List[Any] = 192
lowerCamelCase__ : Any = (2, 2, 18, 2)
lowerCamelCase__ : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCamelCase__ : List[str] = window_size
lowerCamelCase__ : Optional[int] = embed_dim
lowerCamelCase__ : Optional[int] = depths
lowerCamelCase__ : int = num_heads
return config
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
if "encoder.mask_token" in name:
lowerCamelCase__ : str = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCamelCase__ : int = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCamelCase__ : Optional[int] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCamelCase__ : int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase__ : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCamelCase__ : Union[str, Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCamelCase__ : List[Any] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCamelCase__ : List[str] = '''swin.''' + name
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[Any] = orig_state_dict.pop(UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCamelCase__ : str = key.split('''.''' )
lowerCamelCase__ : Tuple = int(key_split[2] )
lowerCamelCase__ : Any = int(key_split[4] )
lowerCamelCase__ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Optional[Any] = val[:dim, :]
lowerCamelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCamelCase__ : List[str] = val[-dim:, :]
else:
lowerCamelCase__ : Tuple = val[
:dim
]
lowerCamelCase__ : List[Any] = val[
dim : dim * 2
]
lowerCamelCase__ : Tuple = val[
-dim:
]
else:
lowerCamelCase__ : Any = val
return orig_state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = torch.load(UpperCAmelCase , map_location='''cpu''' )['''model''']
lowerCamelCase__ : Dict = get_swin_config(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = SwinForMaskedImageModeling(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
lowerCamelCase__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Any = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCamelCase__ : str = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
lowerCamelCase__ : Any = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCAmelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : List[str] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 315
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : List[Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : Any = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def snake_case__ ( lowerCamelCase_ ):
A : int = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' )
def snake_case__ ( lowerCamelCase_ ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
A : int = _readaa(lowerCamelCase_ )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
A : Tuple = _readaa(lowerCamelCase_ )
A : Union[str, Any] = _readaa(lowerCamelCase_ )
A : Tuple = _readaa(lowerCamelCase_ )
A : Tuple = bytestream.read(rows * cols * num_images )
A : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
A : Dict = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , '''Please use tf.one_hot on tensors.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = labels_dense.shape[0]
A : List[Any] = numpy.arange(lowerCamelCase_ ) * num_classes
A : Any = numpy.zeros((num_labels, num_classes) )
A : Optional[Any] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
A : Optional[int] = _readaa(lowerCamelCase_ )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
A : List[str] = _readaa(lowerCamelCase_ )
A : Optional[Any] = bytestream.read(lowerCamelCase_ )
A : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class __lowercase :
"""simple docstring"""
@deprecated(
__UpperCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ) -> Optional[Any]:
A , A : Any = random_seed.get_seed(__UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A : Optional[int] = dtypes.as_dtype(__UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
A : int = 1_00_00
A : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
A : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A : Dict = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A : Dict = images.astype(numpy.floataa )
A : Optional[Any] = numpy.multiply(__UpperCAmelCase , 1.0 / 2_5_5.0 )
A : Optional[Any] = images
A : int = labels
A : Tuple = 0
A : int = 0
@property
def snake_case ( self ) -> Optional[Any]:
return self._images
@property
def snake_case ( self ) -> List[str]:
return self._labels
@property
def snake_case ( self ) -> Tuple:
return self._num_examples
@property
def snake_case ( self ) -> List[str]:
return self._epochs_completed
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> Optional[int]:
if fake_data:
A : int = [1] * 7_84
A : List[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase )],
[fake_label for _ in range(__UpperCAmelCase )],
)
A : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
A : Dict = self.images[perma]
A : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A : Union[str, Any] = self._num_examples - start
A : str = self._images[start : self._num_examples]
A : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
A : str = self.images[perm]
A : Dict = self.labels[perm]
# Start next epoch
A : Dict = 0
A : List[Any] = batch_size - rest_num_examples
A : int = self._index_in_epoch
A : Dict = self._images[start:end]
A : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , '''Please write your own downloading logic.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
A : List[str] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
A : int = f.size()
print('''Successfully downloaded''' , lowerCamelCase_ , lowerCamelCase_ , '''bytes.''' )
return filepath
@deprecated(
lowerCamelCase_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=dtypes.floataa , lowerCamelCase_=True , lowerCamelCase_=5000 , lowerCamelCase_=None , lowerCamelCase_=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
A : List[str] = fake()
A : int = fake()
A : Optional[int] = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
A : str = DEFAULT_SOURCE_URL
A : List[Any] = '''train-images-idx3-ubyte.gz'''
A : Dict = '''train-labels-idx1-ubyte.gz'''
A : List[Any] = '''t10k-images-idx3-ubyte.gz'''
A : Optional[Any] = '''t10k-labels-idx1-ubyte.gz'''
A : Tuple = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Tuple = _extract_images(lowerCamelCase_ )
A : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Tuple = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
A : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Union[str, Any] = _extract_images(lowerCamelCase_ )
A : List[Any] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : str = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
A : Union[str, Any] = (
'''Validation size should be between 0 and '''
F'{len(lowerCamelCase_ )}. Received: {validation_size}.'
)
raise ValueError(lowerCamelCase_ )
A : Tuple = train_images[:validation_size]
A : str = train_labels[:validation_size]
A : int = train_images[validation_size:]
A : Optional[Any] = train_labels[validation_size:]
A : List[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
A : Optional[int] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
A : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
A : Union[str, Any] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 423
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Dict = '''segformer'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[32, 64, 1_60, 2_56] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[1, 2, 5, 8] , __UpperCAmelCase=[4, 4, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=2_56 , __UpperCAmelCase=2_55 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __UpperCAmelCase , )
A : Optional[int] = num_channels
A : int = num_encoder_blocks
A : Optional[Any] = depths
A : List[str] = sr_ratios
A : List[Any] = hidden_sizes
A : Optional[Any] = patch_sizes
A : Any = strides
A : Dict = mlp_ratios
A : Optional[Any] = num_attention_heads
A : int = hidden_act
A : Optional[int] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Optional[int] = classifier_dropout_prob
A : List[Any] = initializer_range
A : int = drop_path_rate
A : Union[str, Any] = layer_norm_eps
A : Union[str, Any] = decoder_hidden_size
A : int = kwargs.get('''reshape_last_stage''' , __UpperCAmelCase )
A : str = semantic_loss_ignore_index
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = version.parse('''1.11''' )
@property
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self ) -> float:
return 1E-4
@property
def snake_case ( self ) -> int:
return 12
| 423
| 1
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = 50 # max width of layer names
_UpperCamelCase = 70 # max width of quantizer names
def _A( lowerCAmelCase ):
A__ : Union[str, Any] = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCAmelCase , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCAmelCase , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCAmelCase , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCAmelCase , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCAmelCase , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCAmelCase , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def _A( lowerCAmelCase ):
if args.calibrator == "max":
A__ : Optional[Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A__ : Optional[Any] = """histogram"""
elif args.calibrator == "mse":
A__ : int = """histogram"""
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
A__ : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase )
A__ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False ):
logger.info("""Configuring Model for Quantization""" )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase , ["""embeddings"""] , which="""weight""" , _disabled=lowerCAmelCase )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase , [""""""] , _disabled=lowerCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase , args.quant_disable_keyword , _disabled=lowerCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase , lowerCAmelCase )
if args.clip_gelu:
clip_gelu(lowerCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase )
def _A( lowerCAmelCase ):
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _A( lowerCAmelCase , lowerCAmelCase ):
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase ):
def fusea(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A__ : str = qq._amax.detach().item()
A__ : List[str] = qk._amax.detach().item()
A__ : str = qv._amax.detach().item()
A__ : int = max(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
qq._amax.fill_(lowerCAmelCase )
qk._amax.fill_(lowerCAmelCase )
qv._amax.fill_(lowerCAmelCase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A( lowerCAmelCase , lowerCAmelCase ):
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A__ : Tuple = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase )
A__ : List[Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _A( lowerCAmelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A__ : int = mod.weight.shape[0]
A__ : Optional[Any] = mod._weight_quantizer._amax.detach()
A__ : Any = torch.ones(lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _A( lowerCAmelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A__ : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A__ : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
A__ : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase , keepdims=lowerCAmelCase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
A__ : Tuple = amax
def _A( lowerCAmelCase , lowerCAmelCase=25 , lowerCAmelCase=180 , lowerCAmelCase=None ):
if ignore is None:
A__ : Optional[int] = []
elif not isinstance(lowerCAmelCase , lowerCAmelCase ):
A__ : List[Any] = [ignore]
A__ : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase , """weight""" ):
continue
A__ : List[str] = max(lowerCAmelCase , len(lowerCAmelCase ) )
for name, mod in model.named_modules():
A__ : List[str] = getattr(lowerCAmelCase , """_input_quantizer""" , lowerCAmelCase )
A__ : Union[str, Any] = getattr(lowerCAmelCase , """_weight_quantizer""" , lowerCAmelCase )
if not hasattr(lowerCAmelCase , """weight""" ):
continue
if type(lowerCAmelCase ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase ) is str and s in name]:
continue
A__ : int = F'''Act:{input_q.extra_repr()}'''
A__ : Union[str, Any] = F'''Wgt:{weight_q.extra_repr()}'''
A__ : List[str] = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase ) <= line_width:
logger.info(lowerCAmelCase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _A( lowerCAmelCase ):
A__ : List[str] = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Tuple = getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase , lowerCAmelCase )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="both" , **lowerCAmelCase ):
A__ : Union[str, Any] = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase , lowerCAmelCase , """_input_quantizer""" , lowerCAmelCase , lowerCAmelCase )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase , lowerCAmelCase , """_weight_quantizer""" , lowerCAmelCase , lowerCAmelCase )
logger.info(lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase , """_input_quantizer""" ) or hasattr(lowerCAmelCase , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCAmelCase , lowerCAmelCase ):
set_quantizers(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCAmelCase , lowerCAmelCase ):
A__ : Dict = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
logger.info(lowerCAmelCase )
| 363
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = 'sew'
def __init__( self , snake_case_=32 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_=2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_=False , snake_case_=128 , snake_case_=16 , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=256 , snake_case_=0 , snake_case_=1 , snake_case_=2 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
A__ : Dict = hidden_size
A__ : Dict = feat_extract_norm
A__ : str = feat_extract_activation
A__ : Optional[Any] = list(snake_case_ )
A__ : str = list(snake_case_ )
A__ : Any = list(snake_case_ )
A__ : Any = conv_bias
A__ : Any = num_conv_pos_embeddings
A__ : Any = num_conv_pos_embedding_groups
A__ : str = len(self.conv_dim )
A__ : Tuple = num_hidden_layers
A__ : int = intermediate_size
A__ : Union[str, Any] = squeeze_factor
A__ : Union[str, Any] = hidden_act
A__ : List[str] = num_attention_heads
A__ : List[str] = hidden_dropout
A__ : Dict = attention_dropout
A__ : Tuple = activation_dropout
A__ : Optional[int] = feat_proj_dropout
A__ : Optional[Any] = final_dropout
A__ : int = layerdrop
A__ : List[Any] = layer_norm_eps
A__ : int = initializer_range
A__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Optional[Any] = apply_spec_augment
A__ : int = mask_time_prob
A__ : Tuple = mask_time_length
A__ : Optional[Any] = mask_time_min_masks
A__ : Any = mask_feature_prob
A__ : List[Any] = mask_feature_length
A__ : Any = mask_feature_min_masks
# ctc loss
A__ : str = ctc_loss_reduction
A__ : List[Any] = ctc_zero_infinity
# sequence classification
A__ : Union[str, Any] = use_weighted_layer_sum
A__ : str = classifier_proj_size
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 363
| 1
|
"""simple docstring"""
from manim import *
class _lowerCAmelCase ( __snake_case ):
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowercase = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowercase = Text('''CPU''' , font_size=24 )
lowercase = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
lowercase = [mem.copy() for i in range(1 )]
lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowercase = Text('''GPU''' , font_size=24 )
lowercase = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.align_to(UpperCAmelCase__ , UpperCAmelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase__ )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
lowercase = Text('''Model''' , font_size=24 )
lowercase = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) , )
lowercase = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ , run_time=2.5 ) , Write(UpperCAmelCase__ ) , Write(UpperCAmelCase__ ) )
self.add(UpperCAmelCase__ )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase__ )
cpu_target.generate_target()
lowercase = 0.46 / 4
lowercase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase__ , buff=0.0 )
cpu_targs.append(UpperCAmelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase__ ) )
second_animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(*UpperCAmelCase__ )
self.wait()
| 721
|
"""simple docstring"""
def A_ ( __UpperCamelCase : int = 1 , __UpperCamelCase : int = 10_00 ):
lowercase = 1
lowercase = 0
for divide_by_number in range(__UpperCamelCase , digit + 1 ):
lowercase = []
lowercase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCamelCase ):
lowercase = len(__UpperCamelCase )
lowercase = divide_by_number
else:
has_been_divided.append(__UpperCamelCase )
lowercase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 396
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase : Tuple = pd.read_csv('sample_data.csv', header=None)
lowerCAmelCase : Dict = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase : int = df.iloc[:, 1:2]
lowerCAmelCase : List[str] = actual_data.values.reshape(len_data, 1)
lowerCAmelCase : int = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase : Tuple = 10
lowerCAmelCase : str = 5
lowerCAmelCase : str = 20
lowerCAmelCase : Optional[Any] = len_data - periods * look_back
lowerCAmelCase : Optional[int] = actual_data[:division]
lowerCAmelCase : Dict = actual_data[division - look_back :]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = [], []
lowerCAmelCase , lowerCAmelCase : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase : List[str] = np.array(train_x)
lowerCAmelCase : Optional[int] = np.array(test_x)
lowerCAmelCase : Any = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase : int = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase : Tuple = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
lowerCAmelCase : Optional[int] = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase : List[str] = model.predict(x_test)
| 3
|
"""simple docstring"""
import os
from pathlib import Path
def snake_case ( ) -> Tuple:
from torch.utils.cpp_extension import load
_snake_case = Path(lowerCAmelCase_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_snake_case = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , lowerCAmelCase_ , with_cuda=lowerCAmelCase_ , extra_include_paths=[str(lowerCAmelCase_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 103
| 0
|
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCAmelCase = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCAmelCase = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase : Optional[int] = numpy_to_pil(__a )
return images
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if images.ndim == 3:
_lowerCAmelCase : List[str] = images[None, ...]
_lowerCAmelCase : List[Any] = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_lowerCAmelCase : Any = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
_lowerCAmelCase : Optional[int] = [Image.fromarray(__a ) for image in images]
return pil_images
| 719
|
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
def A_ ( snake_case__ ) -> dict:
_UpperCamelCase :Any = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(lowercase_ ).json()
def A_ ( snake_case__ = 10 ) -> list[dict]:
_UpperCamelCase :str = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
_UpperCamelCase :Tuple = requests.get(lowercase_ ).json()[:max_stories]
return [get_hackernews_story(lowercase_ ) for story_id in story_ids]
def A_ ( snake_case__ = 10 ) -> str:
_UpperCamelCase :Union[str, Any] = hackernews_top_stories(lowercase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 355
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A__ = get_sagemaker_input()
else:
A__ = get_cluster_input()
return config
def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]:
"""simple docstring"""
if subparsers is not None:
A__ = subparsers.add_parser('''config''' , description=lowercase_ )
else:
A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ )
parser.add_argument(
'''--config_file''' , default=lowercase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = get_user_input()
if args.config_file is not None:
A__ = args.config_file
else:
if not os.path.isdir(lowercase_ ):
os.makedirs(lowercase_ )
A__ = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowercase_ )
else:
config.to_yaml_file(lowercase_ )
print(f"""accelerate configuration saved at {config_file}""" )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = config_command_parser()
A__ = parser.parse_args()
config_command(lowercase_ )
if __name__ == "__main__":
main()
| 87
| 0
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A ):
__a = parent
def snake_case_ ( self ):
return {}
def a ():
__a = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
__a = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case_ ( self ):
__a = MarkupLMFeatureExtractionTester(self )
@property
def snake_case_ ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case_ ( self ):
# Initialize feature_extractor
__a = self.feature_extraction_class()
# Test not batched input
__a = get_html_strings()[0]
__a = feature_extractor(__A )
# fmt: off
__a = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
__a = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
# Test batched
__a = get_html_strings()
__a = feature_extractor(__A )
# fmt: off
__a = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
__a = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
| 713
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCAmelCase__ , AssumeRolePolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) )
__a = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCAmelCase__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(lowerCAmelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def a (lowerCAmelCase__ ):
__a = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowerCAmelCase__ )["Role"]["Arn"]
def a ():
__a = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , lowerCAmelCase__ , )
__a = None
if credentials_configuration == 0:
__a = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__a = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__a = _ask_field("""AWS Access Key ID: """ )
__a = aws_access_key_id
__a = _ask_field("""AWS Secret Access Key: """ )
__a = aws_secret_access_key
__a = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__a = aws_region
__a = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , lowerCAmelCase__ , )
if role_management == 0:
__a = _ask_field("""Enter your IAM role name: """ )
else:
__a = """accelerate_sagemaker_execution_role"""
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowerCAmelCase__ )
__a = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_custom_docker_image:
__a = _ask_field("""Enter your Docker image: """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() )
__a = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , )
__a = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__a = {}
__a = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__a = """dynamo_"""
__a = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__a = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__a = _ask_options(
"""Which mode do you want to use?""" , lowerCAmelCase__ , lambda lowerCAmelCase__ : TORCH_DYNAMO_MODES[int(lowerCAmelCase__ )] , default="""default""" , )
__a = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=lowerCAmelCase__ , error_message="""Please enter yes or no.""" , )
__a = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
lowerCAmelCase__ , lowerCAmelCase__ , lambda lowerCAmelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCAmelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(lowerCAmelCase__ , lambda lowerCAmelCase__ : str(lowerCAmelCase__ ).lower() , default="""ml.p3.2xlarge""" )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
"""How many machines do you want use? [1]: """ , lowerCAmelCase__ , default=1 , )
__a = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowerCAmelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCAmelCase__ , use_cpu=lowerCAmelCase__ , dynamo_config=lowerCAmelCase__ , eca_instance_type=lowerCAmelCase__ , profile=lowerCAmelCase__ , region=lowerCAmelCase__ , iam_role_name=lowerCAmelCase__ , mixed_precision=lowerCAmelCase__ , num_machines=lowerCAmelCase__ , sagemaker_inputs_file=lowerCAmelCase__ , sagemaker_metrics_file=lowerCAmelCase__ , )
| 209
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Dict:
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase__: Dict =getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCamelCase__: str =getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCamelCase__: Union[str, Any] =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_g":
lowerCamelCase__: int =value
elif weight_type == "weight_v":
lowerCamelCase__: Optional[int] =value
elif weight_type == "bias":
lowerCamelCase__: Dict =value
else:
lowerCamelCase__: Any =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[]
lowerCamelCase__: Dict =fairseq_model.state_dict()
lowerCamelCase__: int =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__: Tuple =None
for name, value in fairseq_dict.items():
lowerCamelCase__: Any =False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: Tuple =True
elif name.split("." )[0] == "proj":
lowerCamelCase__: Tuple =fairseq_model.proj
lowerCamelCase__: Optional[int] =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[int] =True
if "*" in mapped_key:
lowerCamelCase__: str =name.split(__lowerCamelCase )[0].split("." )[-2]
lowerCamelCase__: Optional[int] =mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
lowerCamelCase__: Dict ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase__: Optional[Any] ='''weight_v'''
elif "bias" in name:
lowerCamelCase__: Tuple ='''bias'''
elif "weight" in name:
lowerCamelCase__: Optional[int] ='''weight'''
else:
lowerCamelCase__: Optional[Any] =None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Any =full_name.split("conv_layers." )[-1]
lowerCamelCase__: Tuple =name.split("." )
lowerCamelCase__: List[str] =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: Any =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Optional[Any] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: Any =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =emb.weight.shape
lowerCamelCase__: str =nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowerCamelCase__: List[str] =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as f:
lowerCamelCase__: List[Any] =f.readlines()
lowerCamelCase__: int =[line.split(" " )[0] for line in lines]
lowerCamelCase__: List[str] =len(__lowerCamelCase )
lowerCamelCase__: int ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__lowerCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
"""simple docstring"""
lowerCamelCase__: int =WavaVecaConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__: int =SpeechaTextaConfig.from_pretrained(
__lowerCamelCase , vocab_size=__lowerCamelCase , decoder_layers=__lowerCamelCase , do_stable_layer_norm=__lowerCamelCase )
lowerCamelCase__: Dict =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
lowerCamelCase__: Dict =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase__: Union[str, Any] =model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__: str =WavaVecaModel(__lowerCamelCase )
lowerCamelCase__: List[Any] =recursively_load_weights_wavaveca(model.encoder , __lowerCamelCase )
lowerCamelCase__: Union[str, Any] =SpeechaTextaForCausalLM(__lowerCamelCase )
lowerCamelCase__: Optional[int] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowerCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
lowerCamelCase__: Union[str, Any] =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCamelCase__: List[str] =SpeechEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
lowerCamelCase__: List[str] =False
# add projection layer
lowerCamelCase__: Tuple =nn.Parameter(projection_layer.weight )
lowerCamelCase__: Optional[int] =nn.Parameter(projection_layer.bias )
lowerCamelCase__: Dict =create_vocab_dict(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "vocab.json" ) , "w" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__: Optional[Any] =SpeechaTextaTokenizer(os.path.join(__lowerCamelCase , "vocab.json" ) )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCamelCase__: int =hf_wavavec.config.to_dict()
lowerCamelCase__: List[Any] =tokenizer.pad_token_id
lowerCamelCase__: int =tokenizer.bos_token_id
lowerCamelCase__: List[str] =tokenizer.eos_token_id
lowerCamelCase__: Dict ='''speech_to_text_2'''
lowerCamelCase__: Optional[int] ='''wav2vec2'''
lowerCamelCase__: Any =SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 59
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *_snake_case : Any ,**_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = DepthEstimationPipeline(model=_snake_case ,image_processor=_snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self : str ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : int = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,_snake_case )
import datasets
lowercase__ : str = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
lowercase__ : Union[str, Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,_snake_case ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : int = '''Intel/dpt-large'''
lowercase__ : Tuple = pipeline('''depth-estimation''' ,model=_snake_case )
lowercase__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowercase__ : Dict = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.662 )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 560
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any]=1_3 , snake_case_ : int=3_2 , snake_case_ : Union[str, Any]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : List[str]=[1_0, 2_0, 3_0, 4_0] , snake_case_ : Union[str, Any]=[2, 2, 3, 2] , snake_case_ : List[Any]=True , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=3_7 , snake_case_ : str="gelu" , snake_case_ : List[Any]=1_0 , snake_case_ : Optional[int]=0.0_2 , snake_case_ : List[Any]=["stage2", "stage3", "stage4"] , snake_case_ : Optional[int]=[2, 3, 4] , snake_case_ : Optional[Any]=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def lowercase ( self : List[str] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Tuple ):
_UpperCAmelCase = ConvNextVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_UpperCAmelCase = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : Dict , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Tuple ):
_UpperCAmelCase = ConvNextVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_UpperCAmelCase = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = ConvNextVaBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_UpperCAmelCase = model(lowercase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextVaBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_UpperCAmelCase = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
def lowercase ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowerCamelCase : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase : List[Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Dict = False
def lowercase ( self : Dict ):
_UpperCAmelCase = ConvNextVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7 )
def lowercase ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : int ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowercase ( self : List[Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowercase ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowercase ( self : str ):
pass
def lowercase ( self : List[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = True
if model_class.__name__ in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]:
continue
_UpperCAmelCase = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
_UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
_UpperCAmelCase = model(**lowercase_ ).loss
loss.backward()
def lowercase ( self : Tuple ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = False
_UpperCAmelCase = True
if (
model_class.__name__
in [*get_values(lowercase_ ), *get_values(lowercase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCAmelCase = model_class(lowercase_ )
model.to(lowercase_ )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
_UpperCAmelCase = model(**lowercase_ ).loss
loss.backward()
def lowercase ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowercase_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase ( self : Optional[int] ):
def check_hidden_states_output(snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def lowercase ( self : Optional[Any] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : int ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowercase ( self : List[Any] ):
_UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowercase_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = preprocessor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowercase_ )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_UpperCAmelCase = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 704
|
'''simple docstring'''
import sys
from collections import defaultdict
class A_ :
def __init__( self : Dict ):
_UpperCAmelCase = []
def lowercase ( self : Union[str, Any] , snake_case_ : List[str] ):
return self.node_position[vertex]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = pos
def lowercase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Any ):
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(snake_case_ , snake_case_ )
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(snake_case_ , 0 )
def lowercase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any ):
_UpperCAmelCase = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def lowercase ( self : Any , snake_case_ : str , snake_case_ : str ):
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def UpperCAmelCase_ ( __lowercase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(__lowercase )
_UpperCAmelCase = [-1] * len(__lowercase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(__lowercase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowercase )
heap.node_position.append(__lowercase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(__lowercase , __lowercase )
for _ in range(1 , len(__lowercase ) ):
_UpperCAmelCase = heap.delete_minimum(__lowercase , __lowercase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowercase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
__lowercase , heap.get_position(__lowercase ) , __lowercase , __lowercase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__SCREAMING_SNAKE_CASE :Optional[int] = int(input('''Enter number of edges: ''').strip())
__SCREAMING_SNAKE_CASE :Optional[int] = defaultdict(list)
for _ in range(edges_number):
__SCREAMING_SNAKE_CASE :Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 119
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Any = """▁"""
A__ : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
A__ : int = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
A__ : Any = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
A__ : Union[str, Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowercase ( __UpperCamelCase ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = ["""input_ids""", """attention_mask"""]
__a = []
__a = []
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
lowerCAmelCase__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : List[str] = legacy_behaviour
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Optional[int] = len(self.sp_model )
lowerCAmelCase__ : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE__ )
}
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ : List[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCAmelCase__ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase__ : List[str] = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase_ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase_ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : int = [1] * len(self.prefix_tokens )
lowerCAmelCase__ : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__ )) + ([0] * len(SCREAMING_SNAKE_CASE__ )) + suffix_ones
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase__ : Union[str, Any] = src_lang
lowerCAmelCase__ : List[Any] = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = tgt_lang_id
return inputs
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ : Optional[int] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ''' ''' ).strip()
return out_string
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "eng_Latn" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "fra_Latn" , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = src_lang
lowerCAmelCase__ : Any = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ : str = [self.cur_lang_code]
lowerCAmelCase__ : List[str] = [self.eos_token_id]
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : str = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ : Tuple = [self.cur_lang_code]
lowerCAmelCase__ : List[Any] = [self.eos_token_id]
| 233
|
def _a ( __UpperCamelCase : str ):
assert column_title.isupper()
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : List[Any] = len(__UpperCamelCase ) - 1
lowerCAmelCase__ : str = 0
while index >= 0:
lowerCAmelCase__ : Any = (ord(column_title[index] ) - 64) * pow(26 ,__UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 233
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCAmelCase_ = "lm_head"
UpperCAmelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(SCREAMING_SNAKE_CASE_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ) -> List[Any]:
if config_path is not None:
UpperCAmelCase_ = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load_from_json(SCREAMING_SNAKE_CASE_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE_ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 42
UpperCAmelCase_ = 43
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = UniSpeechForCTC(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_ = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE_ )
if is_finetuned:
UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 701
|
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
| 0
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__UpperCamelCase = "<<<<<<< This should probably be modified because it mentions: "
__UpperCamelCase = "=======\n>>>>>>>\n"
__UpperCamelCase = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__UpperCamelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value(\'\1\')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value(\'string\')"),
(R"tfds\.features\.Text\(", R"datasets.Value(\'string\'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _A ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( __magic_name__ : ArgumentParser ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=a__ , required=a__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=a__ , required=a__ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=a__ )
def __init__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : str , *__magic_name__ : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = get_logger("""datasets-cli/converting""" )
__snake_case : Tuple = tfds_path
__snake_case : int = datasets_directory
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
__snake_case : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__snake_case : List[str] = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
__snake_case : Optional[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__snake_case : Any = []
__snake_case : str = []
__snake_case : Optional[Any] = {}
if os.path.isdir(self._tfds_path ):
__snake_case : int = os.listdir(a__ )
else:
__snake_case : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
__snake_case : Optional[Any] = os.path.join(a__ , a__ )
__snake_case : str = os.path.join(a__ , a__ )
if not os.path.isfile(a__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(a__ , encoding="""utf-8""" ) as f:
__snake_case : str = f.readlines()
__snake_case : Optional[Any] = []
__snake_case : Optional[int] = False
__snake_case : Union[str, Any] = False
__snake_case : Optional[Any] = []
for line in lines:
__snake_case : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case : Optional[int] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__snake_case : List[Any] = """"""
continue
elif "from absl import logging" in out_line:
__snake_case : Any = """from datasets import logging\n"""
elif "getLogger" in out_line:
__snake_case : Dict = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = list(filter(lambda __magic_name__ : e in out_line , a__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a__ ) + """\n""" )
out_lines.append(a__ )
out_lines.append(a__ )
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case : Dict = re.sub(a__ , a__ , a__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case : Any = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , a__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
__snake_case : int = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case : Optional[int] = True
out_lines.append(a__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case : Any = f_name.replace(""".py""" , """""" )
__snake_case : Dict = os.path.join(a__ , a__ )
__snake_case : List[Any] = os.path.join(a__ , a__ )
os.makedirs(a__ , exist_ok=a__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a__ )
if needs_manual_update:
with_manual_update.append(a__ )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(a__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__snake_case : int = os.path.basename(a__ )
__snake_case : int = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(a__ , a__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 26
|
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : list[str] ) -> str:
__snake_case = ''''''
for word_or_phrase in separated:
if not isinstance(snake_case_ , snake_case_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 592
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_multiple_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = weight_tying
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ ( self):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = True
return config, input_ids, input_mask, token_labels
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = GPTNeoXJapaneseModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_)
lowerCAmelCase = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = GPTNeoXJapaneseModel(A_)
model.to(A_)
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = GPTNeoXJapaneseForCausalLM(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = GPTNeoXJapaneseForCausalLM(config=A_)
model.to(A_)
model.eval()
# first forward pass
lowerCAmelCase = model(A_ , attention_mask=A_ , use_cache=A_)
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1)
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1)
lowerCAmelCase = model(A_ , attention_mask=A_ , output_hidden_states=A_)
lowerCAmelCase = output_from_no_past["""hidden_states"""][0]
lowerCAmelCase = model(
A_ , attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a__( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase_ : Any = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[int] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = GPTNeoXJapaneseModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A_ , A_ , A_)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A_)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """abeja/gpt-neox-japanese-2.7b"""
lowerCAmelCase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
lowerCAmelCase = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
lowerCAmelCase = GPTNeoXJapaneseTokenizer.from_pretrained(A_)
lowerCAmelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(A_)
lowerCAmelCase = []
for prompt in prompts:
lowerCAmelCase = tokenizer(A_ , return_tensors="""pt""").input_ids
lowerCAmelCase = model.generate(A_ , max_length=50)
lowerCAmelCase = tokenizer.batch_decode(A_ , skip_special_tokens=A_)
predicted_outputs += generated_string
self.assertListEqual(A_ , A_)
| 703
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowercase = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowercase = concatenate_datasets
__lowercase = DownloadConfig
__lowercase = DownloadManager
__lowercase = DownloadMode
__lowercase = DownloadConfig
__lowercase = DownloadMode
__lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 605
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.