code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 65
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Dict = str(SCREAMING_SNAKE_CASE__ )
while len(SCREAMING_SNAKE_CASE__ ) != 1:
lowerCAmelCase : Any = [int(SCREAMING_SNAKE_CASE__ ) for i in num_string]
lowerCAmelCase : Optional[int] = 1
for i in range(0 ,len(SCREAMING_SNAKE_CASE__ ) ):
total *= numbers[i]
lowerCAmelCase : int = str(SCREAMING_SNAKE_CASE__ )
steps += 1
return steps
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
while len(SCREAMING_SNAKE_CASE__ ) != 1:
lowerCAmelCase : List[Any] = [int(SCREAMING_SNAKE_CASE__ ) for i in num_string]
lowerCAmelCase : str = 0
for i in range(0 ,len(SCREAMING_SNAKE_CASE__ ) ):
total += numbers[i]
lowerCAmelCase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase = None , __lowerCamelCase=None , **__lowerCamelCase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
_A : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenizer_file=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowerCamelCase))
_A : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A : Optional[int] = 1
_A : Dict = len(self.sp_model)
_A : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCamelCase)
}
_A : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
_A : List[str] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_A : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A : int = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
_A : Tuple = src_lang if src_lang is not None else "en_XX"
_A : List[Any] = self.lang_code_to_id[self._src_lang]
_A : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> List[Any]:
_A : Optional[Any] = self.__dict__.copy()
_A : List[str] = None
_A : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCamelCase) -> Dict:
_A : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_A : Optional[Any] = {}
_A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _lowerCamelCase ( self) -> Tuple:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase)
_A : Optional[int] = [1] * len(self.prefix_tokens)
_A : Tuple = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase)) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase)) + ([0] * len(__lowerCamelCase)) + suffix_ones
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Optional[int] = [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : int = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Any = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = tgt_lang_id
return inputs
def _lowerCamelCase ( self) -> Dict:
_A : int = {self.convert_ids_to_tokens(__lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : List[str] = self.sp_model.PieceToId(__lowerCamelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
_A : Optional[Any] = "".join(__lowerCamelCase).replace(__lowerCamelCase , " ").strip()
return out_string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(__lowerCamelCase , "wb") as fi:
_A : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase)
return (out_vocab_file,)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Optional[int] = src_lang
_A : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : str = self.lang_code_to_id[src_lang]
_A : Any = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Any = self.lang_code_to_id[lang]
_A : str = []
_A : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 503
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[int] = ort.SessionOptions()
_A : Any = False
return options
def _lowerCamelCase ( self) -> str:
_A : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Tuple = "A red cat sitting on a park bench"
_A : Dict = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type="np" , )
_A : List[Any] = output.images
_A : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _lowerCamelCase ( self) -> List[str]:
_A : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Dict = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
_A : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Optional[int] = "A red cat sitting on a park bench"
_A : Union[str, Any] = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type="np" , )
_A : str = output.images
_A : Optional[int] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 503
| 1
|
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
UpperCamelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( lowercase_ ):
def a ( self ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self ):
snake_case_ = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(snake_case )
def a ( self ):
snake_case_ = self._create_example_records()
snake_case_ = Dataset.from_list(snake_case )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(snake_case ):
self.assertDictEqual(snake_case , example_records[i] )
def a ( self ):
snake_case_ = self._create_example_records()
snake_case_ = Dataset.from_list(snake_case )
snake_case_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self ): # checks what happens with missing columns
snake_case_ = [{'col_1': 1}, {'col_2': 'x'}]
snake_case_ = Dataset.from_list(snake_case )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def a ( self ): # checks if the type can be inferred from the second record
snake_case_ = [{'col_1': []}, {'col_1': [1, 2]}]
snake_case_ = Dataset.from_list(snake_case )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def a ( self ):
snake_case_ = Dataset.from_list([] )
self.assertEqual(len(snake_case ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 108
| 1
|
'''simple docstring'''
def UpperCAmelCase ( A : int ):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE : List[Any] = len(A ) if (len(A ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(A ) , '''Postfix'''.center(A ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A ) == 0:
stack.append(A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A ) # push x to stack
print(
x.center(8 ) , (''''''.join(A )).ljust(A ) , (''''''.join(A )).ljust(A ) , sep=''' | ''' , ) # Output in tabular format
while len(A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(A )).ljust(A ) , (''''''.join(A )).ljust(A ) , sep=''' | ''' , ) # Output in tabular format
return "".join(A ) # return Postfix as str
def UpperCAmelCase ( A : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE : int = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE : Dict = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = input('\nEnter an Infix Equation = ') # Input an Infix equation
lowerCAmelCase_ : Any = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 527
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Union[str, Any]=18 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : Any=4_00 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Any = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
SCREAMING_SNAKE_CASE : List[Any] = do_thumbnail
SCREAMING_SNAKE_CASE : Union[str, Any] = do_align_axis
SCREAMING_SNAKE_CASE : Tuple = do_pad
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Any = DonutImageProcessor if is_vision_available() else None
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DonutImageProcessingTester(self )
@property
def __lowercase ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def __lowercase ( self : int ):
"""simple docstring"""
pass
@is_flaky()
def __lowercase ( self : int ):
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __lowercase ( self : List[Any] ):
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 527
| 1
|
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase__ = len(UpperCAmelCase_ )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCAmelCase__ = 0
print(UpperCAmelCase_ , end=""",""" )
# Consider rest of the activities
for j in range(UpperCAmelCase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase_ , end=""",""" )
lowerCAmelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = [1, 3, 0, 5, 8, 5]
_UpperCamelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 701
|
'''simple docstring'''
import argparse
import datetime
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> str:
lowerCAmelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCAmelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCAmelCase_ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCAmelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCAmelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCAmelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCAmelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCAmelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCAmelCase__ = datetime.date(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) )
# Start math
if m <= 2:
lowerCAmelCase__ = y - 1
lowerCAmelCase__ = m + 12
# maths var
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[:2] )
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[2:] )
lowerCAmelCase__ = int(2.6 * m - 5.39 )
lowerCAmelCase__ = int(c / 4 )
lowerCAmelCase__ = int(k / 4 )
lowerCAmelCase__ = int(d + k )
lowerCAmelCase__ = int(t + u + v + x )
lowerCAmelCase__ = int(z - (2 * c) )
lowerCAmelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCAmelCase__ = F'''Your date {date_input}, is a {days[str(UpperCAmelCase_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 211
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Any = ["image_processor", "tokenizer"]
A :str = "ViTImageProcessor"
A :Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a__ : Any = kwargs.pop("feature_extractor" )
a__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
a__ : Tuple = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if visual_prompt is not None:
a__ : Any = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a__ : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if visual_prompt is not None and images is not None:
a__ : List[Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a__ : Any = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 191
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase = logging.get_logger(__name__)
# General docstring
lowerCamelCase = """PoolFormerConfig"""
# Base docstring
lowerCamelCase = """sail/poolformer_s12"""
lowerCamelCase = [1, 5_12, 7, 7]
# Image classification docstring
lowerCamelCase = """sail/poolformer_s12"""
lowerCamelCase = """tabby, tabby cat"""
lowerCamelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = False ) -> Dict:
if drop_prob == 0.0 or not training:
return input
a__ : Tuple = 1 - drop_prob
a__ : Any = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a__ : Dict = keep_prob + torch.rand(__UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a__ : Optional[int] = input.div(__UpperCamelCase ) * random_tensor
return output
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None ):
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = drop_prob
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return drop_path(__UpperCAmelCase , self.drop_prob , self.training )
def _A ( self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = patch_size if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
a__ : List[str] = stride if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
a__ : Union[str, Any] = padding if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
a__ : int = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase )
a__ : Optional[Any] = norm_layer(__UpperCAmelCase ) if norm_layer else nn.Identity()
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = self.projection(__UpperCAmelCase )
a__ : Union[str, Any] = self.norm(__UpperCAmelCase )
return embeddings
class _a ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
super().__init__(1 , __UpperCAmelCase , **__UpperCAmelCase )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : List[str] = nn.AvgPoolad(__UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.pool(__UpperCAmelCase ) - hidden_states
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : str = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a__ : int = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a__ : Any = PoolFormerDropPath(__UpperCAmelCase )
if isinstance(config.hidden_act , __UpperCAmelCase ):
a__ : List[Any] = ACTaFN[config.hidden_act]
else:
a__ : Union[str, Any] = config.hidden_act
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = self.conva(__UpperCAmelCase )
a__ : Union[str, Any] = self.act_fn(__UpperCAmelCase )
a__ : Dict = self.drop(__UpperCAmelCase )
a__ : Union[str, Any] = self.conva(__UpperCAmelCase )
a__ : int = self.drop(__UpperCAmelCase )
return hidden_states
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = PoolFormerPooling(__UpperCAmelCase )
a__ : Any = PoolFormerOutput(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Any = PoolFormerGroupNorm(__UpperCAmelCase )
a__ : Dict = PoolFormerGroupNorm(__UpperCAmelCase )
# Useful for training neural nets
a__ : List[Any] = PoolFormerDropPath(__UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
a__ : List[str] = config.use_layer_scale
if config.use_layer_scale:
a__ : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
a__ : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
if self.use_layer_scale:
a__ : Any = self.pooling(self.before_norm(__UpperCAmelCase ) )
a__ : int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a__ : Optional[int] = hidden_states + self.drop_path(__UpperCAmelCase )
a__ : Dict = ()
a__ : List[Any] = self.output(self.after_norm(__UpperCAmelCase ) )
a__ : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a__ : Optional[Any] = hidden_states + self.drop_path(__UpperCAmelCase )
a__ : Optional[int] = (output,) + outputs
return outputs
else:
a__ : Optional[int] = self.drop_path(self.pooling(self.before_norm(__UpperCAmelCase ) ) )
# First residual connection
a__ : Tuple = pooling_output + hidden_states
a__ : Tuple = ()
# Second residual connection inside the PoolFormerOutput block
a__ : Optional[int] = self.drop_path(self.output(self.after_norm(__UpperCAmelCase ) ) )
a__ : str = hidden_states + layer_output
a__ : Optional[Any] = (output,) + outputs
return outputs
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = config
# stochastic depth decay rule
a__ : List[str] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a__ : List[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a__ : Any = nn.ModuleList(__UpperCAmelCase )
# Transformer blocks
a__ : Optional[int] = []
a__ : List[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a__ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__UpperCAmelCase ) )
a__ : Any = nn.ModuleList(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
"""simple docstring"""
a__ : int = () if output_hidden_states else None
a__ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a__ , a__ : Optional[Any] = layers
# Get patch embeddings from hidden_states
a__ : Any = embedding_layer(__UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__UpperCAmelCase ):
a__ : List[Any] = blk(__UpperCAmelCase )
a__ : Tuple = layer_outputs[0]
if output_hidden_states:
a__ : Optional[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Optional[int] = PoolFormerConfig
A :List[str] = "poolformer"
A :Tuple = "pixel_values"
A :List[str] = True
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Dict = value
lowerCamelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a__ : Optional[Any] = config
a__ : int = PoolFormerEncoder(__UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def _A ( self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a__ : List[Any] = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
a__ : Any = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
a__ : Any = nn.Linear(config.hidden_size , config.hidden_size )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = self.dense(__UpperCAmelCase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a__ : Optional[Any] = config.num_labels
a__ : int = PoolFormerModel(__UpperCAmelCase )
# Final norm
a__ : Any = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a__ : Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : str = return_dict if return_dict is not None else self.config.use_return_dict
a__ : str = self.poolformer(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
a__ : Optional[Any] = outputs[0]
a__ : Union[str, Any] = self.classifier(self.norm(__UpperCAmelCase ).mean([-2, -1] ) )
a__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a__ : str = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a__ : List[str] = "single_label_classification"
else:
a__ : str = "multi_label_classification"
if self.config.problem_type == "regression":
a__ : int = MSELoss()
if self.num_labels == 1:
a__ : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a__ : Union[str, Any] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a__ : Any = CrossEntropyLoss()
a__ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a__ : Union[str, Any] = BCEWithLogitsLoss()
a__ : Dict = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
a__ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
| 191
| 1
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a__ ( UpperCAmelCase__ ):
snake_case_ = 42
snake_case_ = 42
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case_ = 1
@register_to_config
def __init__( self, _UpperCAmelCase = 2000, _UpperCAmelCase = 0.15, _UpperCAmelCase = 0.01, _UpperCAmelCase = 1348.0, _UpperCAmelCase = 1E-5, _UpperCAmelCase = 1, ):
'''simple docstring'''
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
return sample
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, _UpperCAmelCase, _UpperCAmelCase, device=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ), math.log(_UpperCAmelCase ), _UpperCAmelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(_UpperCAmelCase, _UpperCAmelCase ).to(sample.device )
lowercase__ = torch.zeros_like(_UpperCAmelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=_UpperCAmelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase, prev_sample_mean=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 714
|
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668
| 0
|
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple , lowerCAmelCase: List[str] ) -> Tuple:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_UpperCAmelCase : Tuple = b * b - 4 * a * c
_UpperCAmelCase : Dict = (-b + sqrt(_a )) / (2 * a)
_UpperCAmelCase : List[Any] = (-b - sqrt(_a )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ) -> int:
_UpperCAmelCase : Optional[int] = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 300
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 25
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
if num <= 0:
__lowerCAmelCase = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCamelCase )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = []
__lowerCAmelCase = 2
__lowerCAmelCase = int(math.sqrt(lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase ):
if sieve[i] is True:
__lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 39
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 1
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : int =OpenAIGPTTokenizer
__lowerCamelCase : Optional[Any] =OpenAIGPTTokenizerFast
__lowerCamelCase : Optional[Any] =True
__lowerCamelCase : List[str] =False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__a = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__a = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__lowercase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__lowercase ) )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__a = """lower"""
__a = ["""low""", """er</w>"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__a = tokens + ["""<unk>"""]
__a = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Optional[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
__a = """This is a simple input"""
__a = ["""This is a simple input 1""", """This is a simple input 2"""]
__a = ("""This is a simple input""", """This is a pair""")
__a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
pass
| 225
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCamelCase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[Any] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any]=None ):
'''simple docstring'''
super().__init__(
__lowercase , question_encoder_tokenizer=__lowercase , generator_tokenizer=__lowercase , index=__lowercase , init_retrieval=__lowercase , )
__a = None
def UpperCamelCase_ ( self : List[Any] , __lowercase : int ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__a = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a = str(distributed_port + 1 )
__a = dist.new_group(ranks=__lowercase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self : int , __lowercase : List[str] , __lowercase : int , __lowercase : List[str]=torch.floataa ):
'''simple docstring'''
__a = torch.empty(__lowercase , dtype=__lowercase )
dist.scatter(__lowercase , src=0 , scatter_list=__lowercase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a = next((addr for addr in addrs if addr.startswith("""e""" )) , __lowercase )
return ifname
def UpperCamelCase_ ( self : int , __lowercase : np.ndarray , __lowercase : int ):
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
__a , __a = self._main_retrieve(__lowercase , __lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowercase )
# distributed training
__a = dist.get_world_size(group=self.process_group )
# gather logic
__a = None
if self._is_main():
__a = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowercase )]
dist.gather(torch.tensor(__lowercase ) , dst=0 , gather_list=__lowercase , group=self.process_group )
# scatter logic
__a = question_hidden_states.shape[0]
__a = []
__a = []
if self._is_main():
assert len(__lowercase ) == world_size
__a , __a = self._main_retrieve(torch.cat(__lowercase ).numpy() , __lowercase )
__a , __a = torch.tensor(__lowercase ), torch.tensor(__lowercase )
__a = self._chunk_tensor(__lowercase , __lowercase )
__a = self._chunk_tensor(__lowercase , __lowercase )
__a = self._scattered(__lowercase , [n_queries, n_docs] , target_type=torch.intaa )
__a = self._scattered(__lowercase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowercase )
| 225
| 1
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase__ : Any = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Optional[Any]=None ) -> Dict:
require_version(deps[pkg] , __snake_case )
| 338
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PIL.Image.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : List[Any] = size if size is not None else {'height': 256, 'width': 256}
__A : Tuple = get_size_dict(_UpperCAmelCase)
__A : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : str = get_size_dict(_UpperCAmelCase , param_name='crop_size')
__A : Union[str, Any] = do_resize
__A : Tuple = size
__A : Any = resample
__A : List[Any] = do_center_crop
__A : Tuple = crop_size
__A : Tuple = do_rescale
__A : Optional[int] = rescale_factor
__A : List[str] = do_normalize
__A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PIL.Image.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[Any] = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return resize(
_UpperCAmelCase , size=(size['height'], size['width']) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Any = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : str = do_resize if do_resize is not None else self.do_resize
__A : int = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : Optional[Any] = image_std if image_std is not None else self.image_std
__A : Optional[Any] = size if size is not None else self.size
__A : Dict = get_size_dict(_UpperCAmelCase)
__A : int = crop_size if crop_size is not None else self.crop_size
__A : List[str] = get_size_dict(_UpperCAmelCase , param_name='crop_size')
__A : Dict = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__A : Dict = [to_numpy_array(_UpperCAmelCase) for image in images]
if do_resize:
__A : Dict = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_center_crop:
__A : Tuple = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase) for image in images]
if do_rescale:
__A : Optional[Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
__A : List[Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
__A : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
__A : Any = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase)
| 338
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__a : Optional[Any] = 8
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Dict=BITS ) -> List[str]:
"""simple docstring"""
__A = x.device
__A = (x * 2_5_5).int().clamp(0 , 2_5_5 )
__A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCamelCase )
__A = rearrange(_UpperCamelCase , """d -> d 1 1""" )
__A = rearrange(_UpperCamelCase , """b c h w -> b c 1 h w""" )
__A = ((x & mask) != 0).float()
__A = rearrange(_UpperCamelCase , """b c d h w -> b (c d) h w""" )
__A = bits * 2 - 1
return bits
def _SCREAMING_SNAKE_CASE ( __lowercase : int , __lowercase : Any=BITS ) -> Tuple:
"""simple docstring"""
__A = x.device
__A = (x > 0).int()
__A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCamelCase , dtype=torch.intaa )
__A = rearrange(_UpperCamelCase , """d -> d 1 1""" )
__A = rearrange(_UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
__A = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def _SCREAMING_SNAKE_CASE ( self : Any , __lowercase : str , __lowercase : Dict , __lowercase : Dict , __lowercase : Any = 0.0 , __lowercase : int = True , __lowercase : Union[str, Any]=None , __lowercase : Union[str, Any] = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__A = self.alphas_cumprod[timestep]
__A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__A = self.bit_scale
if self.config.clip_sample:
__A = torch.clamp(_UpperCamelCase , -scale , _UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__A = self._get_variance(_UpperCamelCase , _UpperCamelCase )
__A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__A = model_output.device if torch.is_tensor(_UpperCamelCase ) else '''cpu'''
__A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_UpperCamelCase ).to(_UpperCamelCase )
__A = self._get_variance(_UpperCamelCase , _UpperCamelCase ) ** 0.5 * eta * noise
__A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_UpperCamelCase , pred_original_sample=_UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Optional[int]="epsilon" , __lowercase : List[str]=None , __lowercase : Optional[Any] = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__A = torch.split(_UpperCamelCase , sample.shape[1] , dim=1 )
else:
__A = None
# 1. compute alphas, betas
__A = self.alphas_cumprod[t]
__A = self.alphas_cumprod[t - 1] if t > 0 else self.one
__A = 1 - alpha_prod_t
__A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__A = model_output
else:
raise ValueError(f"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
__A = self.bit_scale
if self.config.clip_sample:
__A = torch.clamp(_UpperCamelCase , -scale , _UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__A = 0
if t > 0:
__A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_UpperCamelCase ).to(model_output.device )
__A = (self._get_variance(_UpperCamelCase , predicted_variance=_UpperCamelCase ) ** 0.5) * noise
__A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_UpperCamelCase , pred_original_sample=_UpperCamelCase )
class __lowercase ( A__ ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] = 1.0 , ):
"""simple docstring"""
super().__init__()
__A = bit_scale
__A = (
ddim_bit_scheduler_step if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCamelCase_ : str = 256 , UpperCamelCase_ : str = 256 , UpperCamelCase_ : Dict = 50 , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Any = 1 , UpperCamelCase_ : Optional[Any] = "pil" , UpperCamelCase_ : List[str] = True , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=SCREAMING_SNAKE_CASE__ , )
__A = decimal_to_bits(SCREAMING_SNAKE_CASE__ ) * self.bit_scale
__A = latents.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__A = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# compute the previous noisy sample x_t -> x_t-1
__A = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
__A = bits_to_decimal(SCREAMING_SNAKE_CASE__ )
if output_type == "pil":
__A = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 637
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.txt'''}
__a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__a = {
'''facebook/esm2_t6_8M_UR50D''': 10_24,
'''facebook/esm2_t12_35M_UR50D''': 10_24,
}
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
with open(_UpperCamelCase, '''r''' ) as f:
lowercase : List[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<cls>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__="<eos>" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : str = load_vocab_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = dict(enumerate(self.all_tokens ) )
lowercase : Tuple = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase : Tuple = unk_token
lowercase : Optional[Any] = cls_token
lowercase : Union[str, Any] = pad_token
lowercase : Dict = mask_token
lowercase : Dict = eos_token
lowercase : Any = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return text.split()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
return len(self._id_to_token )
def __lowerCamelCase ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[str] = [self.cls_token_id]
lowercase : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase : Tuple = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE__ ) + [1]
return mask
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCamelCase ( self ):
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
return super()._add_tokens(SCREAMING_SNAKE_CASE__ , special_tokens=SCREAMING_SNAKE_CASE__ )
| 319
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _snake_case ( snake_case__ : int ):
return x + 2
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = "x = 3"
A = {}
A = evaluate(A_ ,{} ,state=A_ )
assert result == 3
self.assertDictEqual(A_ ,{'x': 3} )
A = "x = y"
A = {"y": 5}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ ,{'x': 5, 'y': 5} )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = "y = add_two(x)"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
A = evaluate(A_ ,{} ,state=A_ )
assert result is None
assert "tried to execute add_two" in out.out
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = "x = 3"
A = {}
A = evaluate(A_ ,{} ,state=A_ )
assert result == 3
self.assertDictEqual(A_ ,{'x': 3} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
A = "test_dict = {'x': x, 'y': add_two(x)}"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
self.assertDictEqual(A_ ,{'x': 3, 'y': 5} )
self.assertDictEqual(A_ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
A = "x = 3\ny = 5"
A = {}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'y': 5} )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = "text = f'This is x: {x}.'"
A = {"x": 3}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A_ ,{'x': 3, 'text': 'This is x: 3.'} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = "if x <= 3:\n y = 2\nelse:\n y = 5"
A = {"x": 3}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A_ ,{'x': 3, 'y': 2} )
A = {"x": 8}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ ,{'x': 8, 'y': 5} )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = "test_list = [x, add_two(x)]"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
self.assertListEqual(A_ ,[3, 5] )
self.assertDictEqual(A_ ,{'x': 3, 'test_list': [3, 5]} )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = "y = x"
A = {"x": 3}
A = evaluate(A_ ,{} ,state=A_ )
assert result == 3
self.assertDictEqual(A_ ,{'x': 3, 'y': 3} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = "test_list = [x, add_two(x)]\ntest_list[1]"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'test_list': [3, 5]} )
A = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
A = "x = 0\nfor i in range(3):\n x = i"
A = {}
A = evaluate(A_ ,{'range': range} ,state=A_ )
assert result == 2
self.assertDictEqual(A_ ,{'x': 2, 'i': 2} )
| 714
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 22
| 0
|
import requests
_lowerCamelCase : int = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __a ( __lowerCAmelCase ) -> None:
# fetching a list of articles in json format
SCREAMING_SNAKE_CASE : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 352
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : List[str] = """\
"""
_lowerCamelCase : Optional[int] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_lowerCamelCase : List[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def lowerCamelCase_ ( self : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : int = 16 , snake_case : bool = True , snake_case : Dict=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Dict = model.to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : List[str] = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors='pt' , return_attention_mask=snake_case , ).to(snake_case )
SCREAMING_SNAKE_CASE : List[Any] = encodings['input_ids']
SCREAMING_SNAKE_CASE : Dict = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(snake_case ) , snake_case ) ):
SCREAMING_SNAKE_CASE : Dict = min(start_index + batch_size , len(snake_case ) )
SCREAMING_SNAKE_CASE : List[str] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(snake_case , attention_mask=snake_case ).logits
SCREAMING_SNAKE_CASE : Optional[Any] = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case )}
| 352
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : Optional[Any] ) -> int:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__snake_case ):
for j in range(__snake_case ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ), end="""\t""" )
else:
print("""INF""", end="""\t""" )
print()
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict ) -> List[str]:
"""simple docstring"""
A__ : Tuple =[[float("""inf""" ) for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
A__ : int =graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__snake_case ):
# looping through rows of graph array
for i in range(__snake_case ):
# looping through columns of graph array
for j in range(__snake_case ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A__ : str =dist[i][k] + dist[k][j]
_print_dist(__snake_case, __snake_case )
return dist, v
if __name__ == "__main__":
__snake_case : Optional[Any] = int(input('Enter number of vertices: '))
__snake_case : Optional[int] = int(input('Enter number of edges: '))
__snake_case : int = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
__snake_case : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
__snake_case : List[Any] = int(input('Enter source:'))
__snake_case : int = int(input('Enter destination:'))
__snake_case : Dict = float(input('Enter weight:'))
__snake_case : Union[str, Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 687
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = GPTSanJapaneseTokenizer
A = False
A = {'do_clean_text': False, 'add_prefix_space': False}
def lowerCamelCase__ ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCamelCase_ ) )
def lowerCamelCase__ ( self :List[Any] , **lowerCamelCase_ :List[str] ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCamelCase__ = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.get_input_output_texts(lowerCamelCase_ )
UpperCamelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return text, ids
def lowerCamelCase__ ( self :List[str] ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ ( self :Any ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ ( self :Any ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ ( self :List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCamelCase__ = "こんにちは、世界。 こんばんは、㔺界。"
UpperCamelCase__ = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCamelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids without special tokens
UpperCamelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids with special tokens
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCamelCase__ = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCamelCase__ = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCamelCase__ = tokenizer.encode(lowerCamelCase_ )
UpperCamelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase__ ( self :Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCamelCase__ = "こんにちは、世界。"
UpperCamelCase__ = "こんばんは、㔺界。😀"
UpperCamelCase__ = "こんにちは、世界。こんばんは、世界。😀"
UpperCamelCase__ = tokenizer.encode(prefix_text + input_text )
UpperCamelCase__ = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCamelCase__ = tokenizer.encode(lowerCamelCase_ , prefix_text=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.decode(lowerCamelCase_ )
UpperCamelCase__ = tokenizer.decode(lowerCamelCase_ )
UpperCamelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase__ ( self :Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCamelCase__ = "こんにちは、世界。"
UpperCamelCase__ = "こんばんは、㔺界。😀"
UpperCamelCase__ = len(tokenizer.encode(lowerCamelCase_ ) ) - 2
UpperCamelCase__ = len(tokenizer.encode(lowerCamelCase_ ) ) - 2
UpperCamelCase__ = [1] + [0] * (len_prefix + len_text + 1)
UpperCamelCase__ = [1] * (len_prefix + len_text + 1) + [0]
UpperCamelCase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCamelCase__ = tokenizer(prefix_text + input_text ).token_type_ids
UpperCamelCase__ = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCamelCase__ = tokenizer(lowerCamelCase_ , prefix_text=lowerCamelCase_ ).token_type_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase__ ( self :int ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCamelCase__ = tokenizer.encode("あンいワ" )
UpperCamelCase__ = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCamelCase__ = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(lowerCamelCase_ ) , tokenizer.decode(lowerCamelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCamelCase_ ) , tokenizer.decode(lowerCamelCase_ ) )
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase__ ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCamelCase__ = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCamelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.batch_encode_plus(lowerCamelCase_ , padding=lowerCamelCase_ )
# fmt: off
UpperCamelCase__ = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
UpperCamelCase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCamelCase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCamelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCamelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCamelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCamelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCamelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
pass
def lowerCamelCase__ ( self :Tuple ) -> Tuple:
"""simple docstring"""
pass
| 516
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Tuple = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'bloom'
A = ['past_key_values']
A = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self :Tuple , lowerCamelCase_ :str=2_5_0_8_8_0 , lowerCamelCase_ :Optional[int]=6_4 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :Any=1e-5 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=1 , lowerCamelCase_ :Optional[int]=False , **lowerCamelCase_ :Dict , ) -> str:
"""simple docstring"""
UpperCamelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase__ = kwargs.pop("n_embed" , lowerCamelCase_ )
UpperCamelCase__ = hidden_size if n_embed is None else n_embed
UpperCamelCase__ = n_layer
UpperCamelCase__ = n_head
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_range
UpperCamelCase__ = use_cache
UpperCamelCase__ = pretraining_tp
UpperCamelCase__ = apply_residual_connection_post_layernorm
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = version.parse('1.12' )
def __init__( self :Tuple , lowerCamelCase_ :PretrainedConfig , lowerCamelCase_ :str = "default" , lowerCamelCase_ :List[PatchingSpec] = None , lowerCamelCase_ :bool = False , ) -> Dict:
"""simple docstring"""
super().__init__(lowerCamelCase_ , task=lowerCamelCase_ , patching_specs=lowerCamelCase_ , use_past=lowerCamelCase_ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase_ ):
# TODO: how to do that better?
UpperCamelCase__ = 0
@property
def lowerCamelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCamelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase_ , direction="inputs" , inverted_values_shape=lowerCamelCase_ )
UpperCamelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase__ ( self :Tuple ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def lowerCamelCase__ ( self :Optional[Any] ) -> float:
"""simple docstring"""
return 1e-3
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :"PreTrainedTokenizer" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ = super(lowerCamelCase_ , self ).generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase__ , UpperCamelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase__ = seqlen + 2
UpperCamelCase__ = self._config.hidden_size // self.num_attention_heads
UpperCamelCase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase__ = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
UpperCamelCase__ = common_inputs["attention_mask"]
if self.use_past:
UpperCamelCase__ = ordered_inputs["attention_mask"].dtype
UpperCamelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self :int ) -> int:
"""simple docstring"""
return 1_3
| 516
| 1
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return base * power(lowerCAmelCase__ ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
SCREAMING_SNAKE_CASE : Dict = int(input('''Enter the base: ''').strip())
SCREAMING_SNAKE_CASE : Optional[Any] = int(input('''Enter the exponent: ''').strip())
SCREAMING_SNAKE_CASE : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
SCREAMING_SNAKE_CASE : Optional[int] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 716
|
"""simple docstring"""
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ):
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( lowerCAmelCase__ ):
return vector * sigmoid(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554
| 0
|
'''simple docstring'''
import requests
a__ : str = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __lowerCamelCase ( UpperCAmelCase_ ) ->None:
# fetching a list of articles in json format
snake_case__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 368
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = True ) ->Dict:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
snake_case__ = timm.create_model('levit_128s' , pretrained=UpperCAmelCase_ )
else:
snake_case__ = timm.create_model('levit_128' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 1_92:
snake_case__ = timm.create_model('levit_192' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 2_56:
snake_case__ = timm.create_model('levit_256' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 3_84:
snake_case__ = timm.create_model('levit_384' , pretrained=UpperCAmelCase_ )
from_model.eval()
snake_case__ = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
snake_case__ = OrderedDict()
snake_case__ = from_model.state_dict()
snake_case__ = list(from_model.state_dict().keys() )
snake_case__ = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for i in range(len(UpperCAmelCase_ ) ):
snake_case__ = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase_ )
snake_case__ = torch.randn((2, 3, 2_24, 2_24) )
snake_case__ = from_model(UpperCAmelCase_ )
snake_case__ = our_model(UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ ), "The model logits don't match the original one."
snake_case__ = name
print(UpperCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True ) ->Any:
snake_case__ = 'imagenet-1k-id2label.json'
snake_case__ = 10_00
snake_case__ = (1, num_labels)
snake_case__ = 'huggingface/label-files'
snake_case__ = num_labels
snake_case__ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
snake_case__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
snake_case__ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
snake_case__ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
a__ : Optional[Any] = parser.parse_args()
a__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 368
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( _lowercase : Callable[[int | float], int | float] , _lowercase : int | float , _lowercase : int | float , _lowercase : int = 100 , ) ->float:
'''simple docstring'''
a : str = x_start
a : Optional[Any] = fnc(_lowercase )
a : Union[str, Any] = 0.0
for _ in range(_lowercase ):
# Approximates curve as a sequence of linear lines and sums their length
a : Optional[Any] = (x_end - x_start) / steps + xa
a : str = fnc(_lowercase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a : Tuple = xa
a : Optional[Any] = fxa
return length
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
a : Dict = 10
while i <= 100000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 31
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 1
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : pyspark.sql.DataFrame , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Optional[Features] = None , __magic_name__ : bool = True , __magic_name__ : str = None , __magic_name__ : bool = False , __magic_name__ : str = None , __magic_name__ : bool = True , __magic_name__ : str = "arrow" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(
split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = load_from_cache_file
lowerCAmelCase__ = file_format
lowerCAmelCase__ = Spark(
df=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , working_dir=__magic_name__ , **__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__magic_name__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 48
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__a : Tuple = Mapping[str, np.ndarray]
__a : int = Mapping[str, Any] # Is a nested dict.
__a : Union[str, Any] = 0.01
@dataclasses.dataclass(frozen=snake_case_ )
class UpperCAmelCase:
"""simple docstring"""
a : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
a : Optional[str] = None
# Templates used to generate this protein (prediction-only)
a : Optional[Sequence[str]] = None
# Chain corresponding to each parent
a : Optional[Sequence[int]] = None
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Protein:
lowercase__ : str = r"(\[[A-Z]+\]\n)"
lowercase__ : List[str] = [tag.strip() for tag in re.split(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
lowercase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] ,[l.split("\n" ) for l in tags[1::2]] )
lowercase__ : List[str] = ["N", "CA", "C"]
lowercase__ : Optional[int] = None
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ : List[Any] = g[1][0].strip()
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ : Optional[int] = "X" # FIXME: strings are immutable
lowercase__ : List[str] = np.array(
[residue_constants.restype_order.get(SCREAMING_SNAKE_CASE_ ,residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(SCREAMING_SNAKE_CASE_ ,g[1][axis].split() ) ) )
lowercase__ : Tuple = np.array(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ : int = np.array(list(map({"-": 0, "+": 1}.get ,g[1][0].strip() ) ) )
lowercase__ : Tuple = np.zeros(
(
len(SCREAMING_SNAKE_CASE_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=SCREAMING_SNAKE_CASE_ ,atom_mask=SCREAMING_SNAKE_CASE_ ,aatype=SCREAMING_SNAKE_CASE_ ,residue_index=np.arange(len(SCREAMING_SNAKE_CASE_ ) ) ,b_factors=SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 0 ) -> List[str]:
lowercase__ : List[str] = []
lowercase__ : Union[str, Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowercase__ : List[Any] = prot.parents
lowercase__ : Any = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ : List[str] = [p for i, p in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) if i == chain_id]
if parents is None or len(SCREAMING_SNAKE_CASE_ ) == 0:
lowercase__ : List[str] = ["N/A"]
pdb_headers.append(F"""PARENT {" ".join(SCREAMING_SNAKE_CASE_ )}""" )
return pdb_headers
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : List[str] = []
lowercase__ : Optional[Any] = pdb_str.split("\n" )
lowercase__ : Any = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowercase__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ : Optional[int] = []
if prot.parents_chain_index is not None:
lowercase__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents ,prot.parents_chain_index ):
parent_dict.setdefault(str(SCREAMING_SNAKE_CASE_ ) ,[] )
parent_dict[str(SCREAMING_SNAKE_CASE_ )].append(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = max([int(SCREAMING_SNAKE_CASE_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ : List[Any] = parent_dict.get(str(SCREAMING_SNAKE_CASE_ ) ,["N/A"] )
parents_per_chain.append(SCREAMING_SNAKE_CASE_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ : List[str] = [["N/A"]]
def make_parent_line(SCREAMING_SNAKE_CASE_ ) -> str:
return F"""PARENT {" ".join(SCREAMING_SNAKE_CASE_ )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ : Dict = 0
for i, l in enumerate(SCREAMING_SNAKE_CASE_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(SCREAMING_SNAKE_CASE_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = parents_per_chain[chain_counter]
else:
lowercase__ : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(SCREAMING_SNAKE_CASE_ ) )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : List[str] = residue_constants.restypes + ["X"]
def res_atoa(SCREAMING_SNAKE_CASE_ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] ,"UNK" )
lowercase__ : Optional[int] = residue_constants.atom_types
lowercase__ : List[str] = []
lowercase__ : int = prot.atom_mask
lowercase__ : str = prot.aatype
lowercase__ : List[str] = prot.atom_positions
lowercase__ : Optional[Any] = prot.residue_index.astype(np.intaa )
lowercase__ : Tuple = prot.b_factors
lowercase__ : Union[str, Any] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
lowercase__ : List[Any] = get_pdb_headers(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
pdb_lines.extend(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = aatype.shape[0]
lowercase__ : List[str] = 1
lowercase__ : Tuple = 0
lowercase__ : List[str] = string.ascii_uppercase
lowercase__ : Optional[int] = None
# Add all atom sites.
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(SCREAMING_SNAKE_CASE_ ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ):
if mask < 0.5:
continue
lowercase__ : Tuple = "ATOM"
lowercase__ : Optional[int] = atom_name if len(SCREAMING_SNAKE_CASE_ ) == 4 else F""" {atom_name}"""
lowercase__ : List[Any] = ""
lowercase__ : int = ""
lowercase__ : Any = 1.00
lowercase__ : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ : Optional[int] = ""
lowercase__ : int = "A"
if chain_index is not None:
lowercase__ : Dict = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(SCREAMING_SNAKE_CASE_ )
atom_index += 1
lowercase__ : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ : Any = True
lowercase__ : int = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ : List[str] = "TER"
lowercase__ : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(SCREAMING_SNAKE_CASE_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,) -> Protein:
return Protein(
aatype=features["aatype"] ,atom_positions=result["final_atom_positions"] ,atom_mask=result["final_atom_mask"] ,residue_index=features["residue_index"] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) ,chain_index=SCREAMING_SNAKE_CASE_ ,remark=SCREAMING_SNAKE_CASE_ ,parents=SCREAMING_SNAKE_CASE_ ,parents_chain_index=SCREAMING_SNAKE_CASE_ ,)
| 397
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
def __a ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase_ = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
lowercase_ = f.readlines()
lowercase_ = f'class {class_name}('
lowercase_ = f'{4 * " "}def {test_name}('
lowercase_ = f'{8 * " "}{correct_line.split()[0]}'
lowercase_ = f'{16 * " "}{correct_line.split()[0]}'
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = 0
lowercase_ = []
for line in lines:
if line.startswith(SCREAMING_SNAKE_CASE_ ):
lowercase_ = True
elif in_class and line.startswith(SCREAMING_SNAKE_CASE_ ):
lowercase_ = True
elif in_class and in_func and (line.startswith(SCREAMING_SNAKE_CASE_ ) or line.startswith(SCREAMING_SNAKE_CASE_ )):
lowercase_ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase_ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase_ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
lowercase_ = False
else:
new_lines.append(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" ) as f:
for line in new_lines:
f.write(SCREAMING_SNAKE_CASE_ )
def __a ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None ) -> str:
'''simple docstring'''
if fail is not None:
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
lowercase_ = {l.strip() for l in f.readlines()}
else:
lowercase_ = None
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
lowercase_ = f.readlines()
lowercase_ = defaultdict(SCREAMING_SNAKE_CASE_ )
for line in correct_lines:
lowercase_ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
lowerCAmelCase_ : Optional[int] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 703
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
lowerCamelCase_ =None
lowerCamelCase_ ="utf-8"
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =True # deprecated
lowerCamelCase_ =None # deprecated
lowerCamelCase_ =1_0 << 2_0 # 10MB
lowerCamelCase_ =None
class lowercase ( datasets.ArrowBasedBuilder ):
lowerCamelCase_ =JsonConfig
def __UpperCAmelCase ( self : int) -> List[str]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
lowercase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.")
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : int) -> Any:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
lowercase_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__lowerCAmelCase , (str, list, tuple)):
lowercase_ = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(__lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
lowercase_ = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(__lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={"files": files}))
return splits
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
lowercase_ = self.config.features.arrow_schema.field(__lowerCAmelCase).type
lowercase_ = pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase) , type=__lowerCAmelCase))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ = table_cast(__lowerCAmelCase , self.config.features.arrow_schema)
return pa_table
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : List[Any]) -> List[Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowercase_ = json.load(__lowerCAmelCase)
# We keep only the field we are interested in
lowercase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple)):
lowercase_ = set().union(*[row.keys() for row in dataset])
lowercase_ = {col: [row.get(__lowerCAmelCase) for row in dataset] for col in keys}
else:
lowercase_ = dataset
lowercase_ = pa.Table.from_pydict(__lowerCAmelCase)
yield file_idx, self._cast_table(__lowerCAmelCase)
# If the file has one json object per line
else:
with open(__lowerCAmelCase , "rb") as f:
lowercase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ = max(self.config.chunksize // 32 , 16 << 10)
lowercase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
lowercase_ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ = batch.decode(self.config.encoding , errors=__lowerCAmelCase).encode("utf-8")
try:
while True:
try:
lowercase_ = paj.read_json(
io.BytesIO(__lowerCAmelCase) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid)
and "straddling" not in str(__lowerCAmelCase)
or block_size > len(__lowerCAmelCase)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__lowerCAmelCase)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.')
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowercase_ = json.load(__lowerCAmelCase)
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase)}: {e}')
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase): # list is the only sequence type supported in JSON
try:
lowercase_ = set().union(*[row.keys() for row in dataset])
lowercase_ = {col: [row.get(__lowerCAmelCase) for row in dataset] for col in keys}
lowercase_ = pa.Table.from_pydict(__lowerCAmelCase)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase)}: {e}')
raise ValueError(F'Not able to read records in the JSON file at {file}.') from None
yield file_idx, self._cast_table(__lowerCAmelCase)
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase)}: {e}')
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys()))}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ') from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase)
batch_idx += 1
| 461
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a__ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a__ = '''hopper-medium-v2'''
a__ = gym.make(env_name)
a__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a__ = env.reset()
a__ = 0
a__ = 0
a__ = 1000
a__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a__ = pipeline(obs, planning_horizon=32)
# execute action in environment
a__ , a__ , a__ , a__ = env.step(denorm_actions)
a__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
a__ = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 14
|
'''simple docstring'''
def A_ ( snake_case , snake_case ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
SCREAMING_SNAKE_CASE:int = str(bin(snake_case ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE:Dict = str(bin(snake_case ) )[2:]
SCREAMING_SNAKE_CASE:List[Any] = max(len(snake_case ) , len(snake_case ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 0
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : List[str] = '▁'
lowercase : Tuple = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase : str = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
lowercase : Optional[int] = {
'facebook/m2m100_418M': 10_24,
}
# fmt: off
lowercase : Union[str, Any] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
__magic_name__ = []
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="m2m100" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=8 , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
A : str = {} if sp_model_kwargs is None else sp_model_kwargs
A : str = language_codes
A : Any = FAIRSEQ_LANGUAGE_CODES[language_codes]
A : List[str] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
A : Tuple = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , language_codes=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : List[str] = vocab_file
A : Dict = load_json(SCREAMING_SNAKE_CASE )
A : Any = {v: k for k, v in self.encoder.items()}
A : int = spm_file
A : Dict = load_spm(SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
A : int = len(self.encoder )
A : Dict = {
self.get_lang_token(SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE )
}
A : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE )}
A : List[Any] = {v: k for k, v in self.lang_token_to_id.items()}
A : Any = src_lang if src_lang is not None else '''en'''
A : Dict = tgt_lang
A : str = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A : List[str] = num_madeup_words
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : List[Any] = []
A : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
A : Tuple = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string.strip()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
A : Any = [1] * len(self.prefix_tokens )
A : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE )) + ([0] * len(SCREAMING_SNAKE_CASE )) + suffix_ones
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
A : str = self.__dict__.copy()
A : Optional[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A : Union[str, Any] = {}
A : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
A : List[str] = Path(SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
A : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A : int = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
A : Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (str(SCREAMING_SNAKE_CASE ), str(SCREAMING_SNAKE_CASE ))
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "en" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "ro" , **SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
A : Union[str, Any] = src_lang
A : Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A : List[str] = src_lang
A : List[Any] = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Tuple = self.get_lang_id(SCREAMING_SNAKE_CASE )
A : str = tgt_lang_id
return inputs
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : str = self.get_lang_token(SCREAMING_SNAKE_CASE )
A : Dict = self.lang_token_to_id[lang_token]
A : List[str] = [self.cur_lang_id]
A : str = [self.eos_token_id]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Optional[Any] = self.get_lang_token(SCREAMING_SNAKE_CASE )
A : List[str] = self.lang_token_to_id[lang_token]
A : List[Any] = [self.cur_lang_id]
A : List[str] = [self.eos_token_id]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Dict = self.get_lang_token(SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = sentencepiece.SentencePieceProcessor(**snake_case__ )
spm.Load(str(snake_case__ ) )
return spm
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''r''' ) as f:
return json.load(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ , indent=2 )
| 716
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase : Tuple = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
A : Dict = True
# Deal with multi-line cases
elif (
re.search(
RF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , snake_case__ , )
is not None
):
A : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A : Tuple = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A : List[Any] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A : List[Any] = True
if not attribute_used:
A : str = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A : Union[str, Any] = True
elif attribute.endswith('''_token_id''' ):
A : Dict = True
# configuration class specific cases
if not case_allowed:
A : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
A : Tuple = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A : int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A : Dict = {}
if len(config_class.attribute_map ) > 0:
A : str = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A : Optional[Any] = inspect.getsourcefile(snake_case__ )
A : Optional[int] = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A : Union[str, Any] = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A : List[Any] = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
A : str = []
for config_param, default_value in zip(snake_case__ , snake_case__ ):
# `attributes` here is all the variant names for `config_param`
A : Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(snake_case__ )
and issubclass(snake_case__ , snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A : List[Any] = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
A : Tuple = unused_attributes
if len(snake_case__ ) > 0:
A : Any = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 343
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case = logging.get_logger(__name__)
enable_full_determinism()
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = UNetaDModel
_a = 'sample'
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = 4
UpperCamelCase__ :Union[str, Any] = 3
UpperCamelCase__ :Optional[Any] = (32, 32)
UpperCamelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.tensor([10] ).to(UpperCamelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCamelCase__ :str = self.dummy_input
return init_dict, inputs_dict
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = UNetaDModel
_a = 'sample'
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = 4
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :str = (32, 32)
UpperCamelCase__ :int = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([10] ).to(UpperCamelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (4, 32, 32)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCamelCase__ :Any = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase_ )
UpperCamelCase__ :Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase_ )
model.to(UpperCamelCase_ )
UpperCamelCase__ :Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase_ )
model_accelerate.to(UpperCamelCase_ )
model_accelerate.eval()
UpperCamelCase__ :Union[str, Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ :Optional[Any] = noise.to(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = model_accelerate(UpperCamelCase_ , UpperCamelCase_ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase__ , UpperCamelCase__ :List[Any] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase_ , low_cpu_mem_usage=UpperCamelCase_ )
model_normal_load.to(UpperCamelCase_ )
model_normal_load.eval()
UpperCamelCase__ :str = model_normal_load(UpperCamelCase_ , UpperCamelCase_ )['''sample''']
assert torch_all_close(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ :Tuple = noise.to(UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase_ )
with torch.no_grad():
UpperCamelCase__ :str = model(UpperCamelCase_ , UpperCamelCase_ ).sample
UpperCamelCase__ :int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ :Optional[Any] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-3 ) )
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = UNetaDModel
_a = 'sample'
@property
def lowerCAmelCase__ ( self , UpperCamelCase_=(32, 32) ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = 4
UpperCamelCase__ :Optional[Any] = 3
UpperCamelCase__ :Dict = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCamelCase__ :str = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.dummy_input
UpperCamelCase__ :Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase_ )
UpperCamelCase__ :str = noise
UpperCamelCase__ :str = model(**UpperCamelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = 4
UpperCamelCase__ :str = 3
UpperCamelCase__ :Union[str, Any] = (256, 256)
UpperCamelCase__ :Any = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase_ )
UpperCamelCase__ :List[str] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase_ )
with torch.no_grad():
UpperCamelCase__ :List[str] = model(UpperCamelCase_ , UpperCamelCase_ ).sample
UpperCamelCase__ :Union[str, Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ :List[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-2 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(UpperCamelCase_ )
UpperCamelCase__ :str = 4
UpperCamelCase__ :int = 3
UpperCamelCase__ :Optional[Any] = (32, 32)
UpperCamelCase__ :str = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase_ )
UpperCamelCase__ :List[str] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase_ )
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ).sample
UpperCamelCase__ :Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ :List[str] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-2 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
| 189
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def a ( __a , __a ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
| 1
|
import requests
from bsa import BeautifulSoup
def _a ( SCREAMING_SNAKE_CASE = "AAPL" ):
"""simple docstring"""
lowercase__ = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
lowercase__ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
lowercase__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 719
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase__ = Vector()
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,0,0,0,0,1)''' )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2] )
lowercase__ = Vector([1, 2, 3, 4, 5] )
lowercase__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([2, -1, 4] ) # for test of dot product
lowercase__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: List[Any] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase_ , UpperCamelCase_ ) ) , '''(3,4,7)''' )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0, 0, 0, 0] )
lowercase__ = x.copy()
self.assertEqual(str(UpperCamelCase_ ) , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase__ = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 429
| 0
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase : int = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase_ ( __a ) -> List[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase_ ( __a , __a ) -> int:
if args.student_type == "roberta":
a__ : str = False
elif args.student_type == "gpt2":
a__ : List[str] = False
def UpperCamelCase_ ( __a , __a ) -> Optional[int]:
if args.student_type == "roberta":
a__ : Union[str, Any] = False
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__a , required=__a , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__a , required=__a , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__a , choices=["distilbert", "roberta", "gpt2"] , required=__a , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__a , required=__a , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__a , type=__a , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__a , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__a , required=__a , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__a , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__a , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__a , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__a , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__a , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__a , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=__a , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__a , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__a , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__a , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__a , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__a , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__a , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__a , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__a , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=__a , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__a , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=__a , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=__a , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__a , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=__a , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__a , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__a , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__a , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__a , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=__a , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__a , default=4_000 , help="Checkpoint interval." )
a__ : List[str] = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
a__, a__, a__ : Dict = MODEL_CLASSES[args.student_type]
a__, a__, a__ : List[Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a__ : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a__ : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a__ : Optional[Any] = tokenizer.all_special_tokens.index(__a )
a__ : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
a__ : List[str] = special_tok_ids
a__ : Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
a__ : Tuple = pickle.load(__a )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
a__ : Optional[Any] = pickle.load(__a )
a__ : Dict = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a__ : List[Any] = 0.0 # do not predict special tokens
a__ : List[str] = torch.from_numpy(__a )
else:
a__ : Any = None
a__ : Any = LmSeqsDataset(params=__a , data=__a )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
a__ : List[str] = student_config_class.from_pretrained(args.student_config )
a__ : Optional[Any] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
a__ : List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
a__ : List[Any] = student_model_class(__a )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
a__ : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a__ : List[Any] = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 37
|
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 43
| 0
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ) -> Dict:
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]="attention" ) -> Optional[Any]:
snake_case = snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ) -> int:
if split_mlp_wi:
snake_case = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
snake_case = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
snake_case = (wi_a, wi_a)
else:
snake_case = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
snake_case = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def __lowerCamelCase ( __lowerCAmelCase : dict , *, __lowerCAmelCase : int , __lowerCAmelCase : bool , __lowerCAmelCase : bool = False ) -> List[str]:
snake_case = traverse_util.flatten_dict(variables["""target"""] )
snake_case = {"""/""".join(__lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __lowerCAmelCase )
snake_case = collections.OrderedDict()
# Shared embeddings.
snake_case = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
snake_case = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , """encoder""" , """pre_attention_layer_norm""" )
snake_case , snake_case , snake_case , snake_case = tax_attention_lookup(__lowerCAmelCase , __lowerCAmelCase , """encoder""" , """attention""" )
snake_case = layer_norm
snake_case = k.T
snake_case = o.T
snake_case = q.T
snake_case = v.T
# Block i, layer 1 (MLP).
snake_case = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , """encoder""" , """pre_mlp_layer_norm""" )
snake_case , snake_case = tax_mlp_lookup(__lowerCAmelCase , __lowerCAmelCase , """encoder""" , __lowerCAmelCase )
snake_case = layer_norm
if split_mlp_wi:
snake_case = wi[0].T
snake_case = wi[1].T
else:
snake_case = wi.T
snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case = tax_relpos_bias_lookup(
__lowerCAmelCase , __lowerCAmelCase , """encoder""" ).T
snake_case = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
snake_case = tax_relpos_bias_lookup(
__lowerCAmelCase , 0 , """encoder""" ).T
snake_case = tax_relpos_bias_lookup(
__lowerCAmelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
snake_case = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , """decoder""" , """pre_self_attention_layer_norm""" )
snake_case , snake_case , snake_case , snake_case = tax_attention_lookup(__lowerCAmelCase , __lowerCAmelCase , """decoder""" , """self_attention""" )
snake_case = layer_norm
snake_case = k.T
snake_case = o.T
snake_case = q.T
snake_case = v.T
# Block i, layer 1 (Cross Attention).
snake_case = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
snake_case , snake_case , snake_case , snake_case = tax_attention_lookup(__lowerCAmelCase , __lowerCAmelCase , """decoder""" , """encoder_decoder_attention""" )
snake_case = layer_norm
snake_case = k.T
snake_case = o.T
snake_case = q.T
snake_case = v.T
# Block i, layer 2 (MLP).
snake_case = tax_layer_norm_lookup(__lowerCAmelCase , __lowerCAmelCase , """decoder""" , """pre_mlp_layer_norm""" )
snake_case , snake_case = tax_mlp_lookup(__lowerCAmelCase , __lowerCAmelCase , """decoder""" , __lowerCAmelCase )
snake_case = layer_norm
if split_mlp_wi:
snake_case = wi[0].T
snake_case = wi[1].T
else:
snake_case = wi.T
snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case = tax_relpos_bias_lookup(__lowerCAmelCase , __lowerCAmelCase , """decoder""" ).T
snake_case = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case = old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : bool ) -> List[Any]:
snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
snake_case = state_dict["""shared.weight"""]
return state_dict
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> Union[str, Any]:
snake_case = checkpoints.load_tax_checkpoint(__lowerCAmelCase )
snake_case = convert_tax_to_pytorch(
__lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=__lowerCAmelCase , scalable_attention=__lowerCAmelCase )
snake_case = make_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , ) -> Union[str, Any]:
snake_case = MTaConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case = UMTaEncoderModel(__lowerCAmelCase )
else:
snake_case = UMTaForConditionalGeneration(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCAmelCase )
print("""Done""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 369
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 369
| 1
|
def _lowercase ( a_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowercase ( a_ : dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = len(a_ ) # No of vertices in graph
__magic_name__ = [0] * n
__magic_name__ = [False] * n
def dfs(a_ : Tuple ,a_ : str ,a_ : Optional[Any] ,a_ : Dict ):
__magic_name__ = True
__magic_name__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(a_ ,a_ ,a_ ,id_ )
__magic_name__ = min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__magic_name__ = min(low[at] ,low[to] )
__magic_name__ = []
for i in range(a_ ):
if not visited[i]:
dfs(a_ ,-1 ,a_ ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ = logging.get_logger(__name__)
A__ = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = "layoutlmv3"
def __init__( self: Tuple , __UpperCamelCase: List[Any]=5_02_65 , __UpperCamelCase: Union[str, Any]=7_68 , __UpperCamelCase: Optional[int]=12 , __UpperCamelCase: List[str]=12 , __UpperCamelCase: List[Any]=30_72 , __UpperCamelCase: Union[str, Any]="gelu" , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: List[Any]=5_12 , __UpperCamelCase: int=2 , __UpperCamelCase: Union[str, Any]=0.02 , __UpperCamelCase: Dict=1E-5 , __UpperCamelCase: int=1 , __UpperCamelCase: Dict=0 , __UpperCamelCase: Tuple=2 , __UpperCamelCase: Any=10_24 , __UpperCamelCase: Union[str, Any]=1_28 , __UpperCamelCase: Dict=1_28 , __UpperCamelCase: Any=True , __UpperCamelCase: Any=32 , __UpperCamelCase: int=1_28 , __UpperCamelCase: List[str]=64 , __UpperCamelCase: Union[str, Any]=2_56 , __UpperCamelCase: int=True , __UpperCamelCase: Optional[int]=True , __UpperCamelCase: Union[str, Any]=True , __UpperCamelCase: Union[str, Any]=2_24 , __UpperCamelCase: Tuple=3 , __UpperCamelCase: Any=16 , __UpperCamelCase: List[Any]=None , **__UpperCamelCase: str , ):
'''simple docstring'''
super().__init__(
vocab_size=__UpperCamelCase , hidden_size=__UpperCamelCase , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , intermediate_size=__UpperCamelCase , hidden_act=__UpperCamelCase , hidden_dropout_prob=__UpperCamelCase , attention_probs_dropout_prob=__UpperCamelCase , max_position_embeddings=__UpperCamelCase , type_vocab_size=__UpperCamelCase , initializer_range=__UpperCamelCase , layer_norm_eps=__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
__magic_name__ = max_ad_position_embeddings
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = has_relative_attention_bias
__magic_name__ = rel_pos_bins
__magic_name__ = max_rel_pos
__magic_name__ = has_spatial_attention_bias
__magic_name__ = rel_ad_pos_bins
__magic_name__ = max_rel_ad_pos
__magic_name__ = text_embed
__magic_name__ = visual_embed
__magic_name__ = input_size
__magic_name__ = num_channels
__magic_name__ = patch_size
__magic_name__ = classifier_dropout
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Any = version.parse("1.12" )
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return 12
def _SCREAMING_SNAKE_CASE ( self: Tuple , __UpperCamelCase: "ProcessorMixin" , __UpperCamelCase: int = -1 , __UpperCamelCase: int = -1 , __UpperCamelCase: bool = False , __UpperCamelCase: Optional["TensorType"] = None , __UpperCamelCase: int = 3 , __UpperCamelCase: int = 40 , __UpperCamelCase: int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , __UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ = processor.tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__magic_name__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__magic_name__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__magic_name__ = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ = dict(
processor(
__UpperCamelCase , text=__UpperCamelCase , boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , ) )
return inputs
| 184
| 0
|
class lowercase_ :
def __init__( self , lowercase_ , lowercase_) -> str:
a__ =name
a__ =val
def __str__( self) -> Tuple:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , lowercase_) -> Any:
return self.val < other.val
class lowercase_ :
def __init__( self , lowercase_) -> Any:
a__ ={}
a__ ={}
a__ =self.build_heap(lowercase_)
def __getitem__( self , lowercase_) -> List[str]:
return self.get_value(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
return (idx - 1) // 2
def __UpperCamelCase ( self , lowercase_) -> int:
return idx * 2 + 1
def __UpperCamelCase ( self , lowercase_) -> List[Any]:
return idx * 2 + 2
def __UpperCamelCase ( self , lowercase_) -> Any:
return self.heap_dict[key]
def __UpperCamelCase ( self , lowercase_) -> str:
a__ =len(lowercase_) - 1
a__ =self.get_parent_idx(lowercase_)
for idx, i in enumerate(lowercase_):
a__ =idx
a__ =i.val
for i in range(lowercase_ , -1 , -1):
self.sift_down(lowercase_ , lowercase_)
return array
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> List[str]:
while True:
a__ =self.get_left_child_idx(lowercase_) # noqa: E741
a__ =self.get_right_child_idx(lowercase_)
a__ =idx
if l < len(lowercase_) and array[l] < array[idx]:
a__ =l
if r < len(lowercase_) and array[r] < array[smallest]:
a__ =r
if smallest != idx:
a__ , a__ =array[smallest], array[idx]
(
(
a__
) , (
a__
) ,
) =(
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a__ =smallest
else:
break
def __UpperCamelCase ( self , lowercase_) -> Dict:
a__ =self.get_parent_idx(lowercase_)
while p >= 0 and self.heap[p] > self.heap[idx]:
a__ , a__ =self.heap[idx], self.heap[p]
a__ , a__ =(
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a__ =p
a__ =self.get_parent_idx(lowercase_)
def __UpperCamelCase ( self) -> List[str]:
return self.heap[0]
def __UpperCamelCase ( self) -> Optional[int]:
a__ , a__ =self.heap[-1], self.heap[0]
a__ , a__ =(
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a__ =self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def __UpperCamelCase ( self , lowercase_) -> Tuple:
self.heap.append(lowercase_)
a__ =len(self.heap) - 1
a__ =node.val
self.sift_up(len(self.heap) - 1)
def __UpperCamelCase ( self) -> Union[str, Any]:
return len(self.heap) == 0
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a__ =new_value
a__ =new_value
self.sift_up(self.idx_of_element[node])
_lowerCAmelCase: Tuple = Node('R', -1)
_lowerCAmelCase: Optional[int] = Node('B', 6)
_lowerCAmelCase: Tuple = Node('A', 3)
_lowerCAmelCase: int = Node('X', 1)
_lowerCAmelCase: List[str] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCAmelCase: int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694
| 0
|
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return len(set(lowerCamelCase__ ) ) == len(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :BigBirdConfig
__snake_case :jnp.dtype = jnp.floataa
__snake_case :bool = True
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setup()
__lowercase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[int] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = FlaxBigBirdForNaturalQuestionsModule
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def cross_entropy(lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
__lowercase = logits.shape[-1]
__lowercase = (labels[..., None] == jnp.arange(lowerCamelCase )[None]).astype("""f4""" )
__lowercase = jax.nn.log_softmax(lowerCamelCase , axis=-1 )
__lowercase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowercase = reduction(lowerCamelCase )
return loss
__lowercase = partial(lowerCamelCase , reduction=jnp.mean )
__lowercase = cross_entropy(lowerCamelCase , lowerCamelCase )
__lowercase = cross_entropy(lowerCamelCase , lowerCamelCase )
__lowercase = cross_entropy(lowerCamelCase , lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __UpperCamelCase :
__snake_case :str = "google/bigbird-roberta-base"
__snake_case :int = 3_0_0_0
__snake_case :int = 1_0_5_0_0
__snake_case :int = 1_2_8
__snake_case :int = 3
__snake_case :int = 1
__snake_case :int = 5
# tx_args
__snake_case :float = 3e-5
__snake_case :float = 0.0
__snake_case :int = 2_0_0_0_0
__snake_case :float = 0.00_95
__snake_case :str = "bigbird-roberta-natural-questions"
__snake_case :str = "training-expt"
__snake_case :str = "data/nq-training.jsonl"
__snake_case :str = "data/nq-validation.jsonl"
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=_lowerCAmelCase )
__lowercase = os.path.join(self.base_dir , self.save_dir )
__lowercase = self.batch_size_per_device * jax.device_count()
@dataclass
class __UpperCamelCase :
__snake_case :int
__snake_case :int = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self : Any , _lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = self.collate_fn(_lowerCAmelCase )
__lowercase = jax.tree_util.tree_map(_lowerCAmelCase , _lowerCAmelCase )
return batch
def _a ( self : Dict , _lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.fetch_inputs(features["""input_ids"""] )
__lowercase = {
"""input_ids""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _a ( self : Optional[Any] , _lowerCAmelCase : list ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [self._fetch_inputs(_lowerCAmelCase ) for ids in input_ids]
return zip(*_lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : list ) -> Tuple:
"""simple docstring"""
__lowercase = [1 for _ in range(len(_lowerCAmelCase ) )]
while len(_lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
if seed is not None:
__lowercase = dataset.shuffle(seed=lowerCamelCase )
for i in range(len(lowerCamelCase ) // batch_size ):
__lowercase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase )
@partial(jax.pmap , axis_name="""batch""" )
def snake_case ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
def loss_fn(lowerCamelCase ):
__lowercase = model_inputs.pop("""start_labels""" )
__lowercase = model_inputs.pop("""end_labels""" )
__lowercase = model_inputs.pop("""pooled_labels""" )
__lowercase = state.apply_fn(**lowerCamelCase , params=lowerCamelCase , dropout_rng=lowerCamelCase , train=lowerCamelCase )
__lowercase , __lowercase , __lowercase = outputs
return state.loss_fn(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
__lowercase , __lowercase = jax.random.split(lowerCamelCase )
__lowercase = jax.value_and_grad(lowerCamelCase )
__lowercase , __lowercase = grad_fn(state.params )
__lowercase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
__lowercase = jax.lax.pmean(lowerCamelCase , """batch""" )
__lowercase = state.apply_gradients(grads=lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def snake_case ( lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
__lowercase = model_inputs.pop("""start_labels""" )
__lowercase = model_inputs.pop("""end_labels""" )
__lowercase = model_inputs.pop("""pooled_labels""" )
__lowercase = state.apply_fn(**lowerCamelCase , params=state.params , train=lowerCamelCase )
__lowercase , __lowercase , __lowercase = outputs
__lowercase = state.loss_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __UpperCamelCase ( train_state.TrainState ):
__snake_case :Callable = struct.field(pytree_node=_lowerCAmelCase )
@dataclass
class __UpperCamelCase :
__snake_case :Args
__snake_case :Callable
__snake_case :Callable
__snake_case :Callable
__snake_case :Callable
__snake_case :wandb
__snake_case :Callable = None
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=None ) -> int:
"""simple docstring"""
__lowercase = model.params
__lowercase = TrainState.create(
apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , loss_fn=_lowerCAmelCase , )
if ckpt_dir is not None:
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = restore_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
__lowercase , __lowercase = build_tx(**_lowerCAmelCase )
__lowercase = train_state.TrainState(
step=_lowerCAmelCase , apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , opt_state=_lowerCAmelCase , )
__lowercase = args
__lowercase = data_collator
__lowercase = lr
__lowercase = params
__lowercase = jax_utils.replicate(_lowerCAmelCase )
return state
def _a ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.args
__lowercase = len(_lowerCAmelCase ) // args.batch_size
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowercase = jnp.array(0 , dtype=jnp.floataa )
__lowercase = get_batched_dataset(_lowerCAmelCase , args.batch_size , seed=_lowerCAmelCase )
__lowercase = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc=F'Running EPOCH-{epoch}' ):
__lowercase = self.data_collator(_lowerCAmelCase )
__lowercase , __lowercase , __lowercase = self.train_step_fn(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
__lowercase = jax_utils.unreplicate(state.step )
__lowercase = running_loss.item() / i
__lowercase = self.scheduler_fn(state_step - 1 )
__lowercase = self.evaluate(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_lowerCAmelCase ) )
self.logger.log(_lowerCAmelCase , commit=_lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=_lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = get_batched_dataset(_lowerCAmelCase , self.args.batch_size )
__lowercase = len(_lowerCAmelCase ) // self.args.batch_size
__lowercase = jnp.array(0 , dtype=jnp.floataa )
__lowercase = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc="""Evaluating ... """ ):
__lowercase = self.data_collator(_lowerCAmelCase )
__lowercase = self.val_step_fn(_lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _a ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = jax_utils.unreplicate(_lowerCAmelCase )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ )
self.model_save_fn(_lowerCAmelCase , params=state.params )
with open(os.path.join(_lowerCAmelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCAmelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(_lowerCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(_lowerCAmelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , _lowerCAmelCase )
print("""DONE""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowerCamelCase , """flax_model.msgpack""" ) , """rb""" ) as f:
__lowercase = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase , """opt_state.msgpack""" ) , """rb""" ) as f:
__lowercase = from_bytes(state.opt_state , f.read() )
__lowercase = joblib.load(os.path.join(lowerCamelCase , """args.joblib""" ) )
__lowercase = joblib.load(os.path.join(lowerCamelCase , """data_collator.joblib""" ) )
with open(os.path.join(lowerCamelCase , """training_state.json""" ) , """r""" ) as f:
__lowercase = json.load(lowerCamelCase )
__lowercase = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = num_train_steps - warmup_steps
__lowercase = optax.linear_schedule(init_value=lowerCamelCase , end_value=lowerCamelCase , transition_steps=lowerCamelCase )
__lowercase = optax.linear_schedule(init_value=lowerCamelCase , end_value=1e-7 , transition_steps=lowerCamelCase )
__lowercase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def weight_decay_mask(lowerCamelCase ):
__lowercase = traverse_util.flatten_dict(lowerCamelCase )
__lowercase = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase )
__lowercase = scheduler_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = optax.adamw(learning_rate=lowerCamelCase , weight_decay=lowerCamelCase , mask=lowerCamelCase )
return tx, lr
| 80
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
'''simple docstring'''
def snake_case_ ( a__ : List[str] ,a__ : Tuple ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowercase = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b"
__lowercase = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b"
__lowercase = max(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCAmelCase ) ,b_binary.zfill(_lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
def snake_case_ ( a__ : int ):
"""simple docstring"""
__lowercase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def snake_case_ ( a__ : int = 1_00 ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
for i in range(2 ,max_n + 1 ):
__lowercase = pre_numerator
__lowercase = 2 * i // 3 if i % 3 == 0 else 1
__lowercase = cur_numerator
__lowercase = e_cont * pre_numerator + temp
return sum_digits(a__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
def __init__( self : Optional[int] ) -> Optional[int]:
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 2_56
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
__lowerCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCamelCase = copy.deepcopy(self.img )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
__lowerCamelCase = np.sum(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCamelCase = x[i] / self.k
self.sk += prk
__lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCamelCase = int(last % last )
__lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __A ( self : str ) -> Union[str, Any]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
SCREAMING_SNAKE_CASE__ : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 298
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[Union[str, Path]] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Dict] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : int = 1
UpperCamelCase_ : Optional[Union[str, bool]] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Dict] = None
UpperCamelCase_ : Optional[str] = None
def _A ( self : List[Any] ):
return self.__class__(**{k: copy.deepcopy(UpperCAmelCase_ ) for k, v in self.__dict__.items()} )
| 488
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase__ ( lowercase="no" , lowercase = default_json_config_file , lowercase = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Path(lowercase )
path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
SCREAMING_SNAKE_CASE : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE : int = num_gpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE : Tuple = "MULTI_GPU"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = "NO"
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count()
SCREAMING_SNAKE_CASE : str = num_xpus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE : Any = "MULTI_XPU"
else:
SCREAMING_SNAKE_CASE : str = "NO"
elif is_npu_available():
SCREAMING_SNAKE_CASE : List[Any] = torch.npu.device_count()
SCREAMING_SNAKE_CASE : Optional[Any] = num_npus
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if num_npus > 1:
SCREAMING_SNAKE_CASE : str = "MULTI_NPU"
else:
SCREAMING_SNAKE_CASE : int = "NO"
else:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : int = "NO"
SCREAMING_SNAKE_CASE : Dict = ClusterConfig(**lowercase )
config.to_json_file(lowercase )
return path
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parser.add_parser("default" , parents=lowercase , help=lowercase , formatter_class=lowercase )
parser.add_argument(
"--config_file" , default=lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowercase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowercase )
return parser
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 488
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case : Optional[int] = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
snake_case : str = {
'''yjernite/retribert-base-uncased''': 5_12,
}
snake_case : List[str] = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = RetriBertTokenizer
UpperCAmelCase__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] ,__snake_case :Dict=None ,__snake_case :Optional[Any]=None ,__snake_case :List[Any]=True ,__snake_case :Any="[UNK]" ,__snake_case :int="[SEP]" ,__snake_case :Dict="[PAD]" ,__snake_case :Dict="[CLS]" ,__snake_case :Union[str, Any]="[MASK]" ,__snake_case :List[str]=True ,__snake_case :Optional[Any]=None ,**__snake_case :Tuple ,) -> Union[str, Any]:
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' ,__snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__snake_case ) != tokenize_chinese_chars
):
a__ = getattr(__snake_case ,normalizer_state.pop('type' ) )
a__ = do_lower_case
a__ = strip_accents
a__ = tokenize_chinese_chars
a__ = normalizer_class(**__snake_case )
a__ = do_lower_case
def lowerCamelCase__( self :List[str] ,__snake_case :int ,__snake_case :Dict=None ) -> List[str]:
a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__( self :Tuple ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
a__ = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 335
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case : int = logging.get_logger(__name__)
snake_case : List[Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = '''layoutlmv3'''
def __init__( self :Optional[Any] ,__snake_case :Dict=5_02_65 ,__snake_case :Union[str, Any]=7_68 ,__snake_case :Dict=12 ,__snake_case :List[str]=12 ,__snake_case :Any=30_72 ,__snake_case :int="gelu" ,__snake_case :List[str]=0.1 ,__snake_case :Optional[Any]=0.1 ,__snake_case :List[Any]=5_12 ,__snake_case :Any=2 ,__snake_case :Dict=0.02 ,__snake_case :Dict=1E-5 ,__snake_case :Tuple=1 ,__snake_case :Optional[int]=0 ,__snake_case :List[Any]=2 ,__snake_case :Optional[Any]=10_24 ,__snake_case :List[str]=1_28 ,__snake_case :List[str]=1_28 ,__snake_case :str=True ,__snake_case :Any=32 ,__snake_case :Union[str, Any]=1_28 ,__snake_case :Optional[Any]=64 ,__snake_case :List[Any]=2_56 ,__snake_case :Any=True ,__snake_case :Optional[int]=True ,__snake_case :List[str]=True ,__snake_case :Any=2_24 ,__snake_case :Union[str, Any]=3 ,__snake_case :int=16 ,__snake_case :Any=None ,**__snake_case :Dict ,) -> Any:
super().__init__(
vocab_size=__snake_case ,hidden_size=__snake_case ,num_hidden_layers=__snake_case ,num_attention_heads=__snake_case ,intermediate_size=__snake_case ,hidden_act=__snake_case ,hidden_dropout_prob=__snake_case ,attention_probs_dropout_prob=__snake_case ,max_position_embeddings=__snake_case ,type_vocab_size=__snake_case ,initializer_range=__snake_case ,layer_norm_eps=__snake_case ,pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case ,)
a__ = max_ad_position_embeddings
a__ = coordinate_size
a__ = shape_size
a__ = has_relative_attention_bias
a__ = rel_pos_bins
a__ = max_rel_pos
a__ = has_spatial_attention_bias
a__ = rel_ad_pos_bins
a__ = max_rel_ad_pos
a__ = text_embed
a__ = visual_embed
a__ = input_size
a__ = num_channels
a__ = patch_size
a__ = classifier_dropout
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = version.parse('''1.12''' )
@property
def lowerCamelCase__( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def lowerCamelCase__( self :Tuple ) -> float:
return 1E-5
@property
def lowerCamelCase__( self :Any ) -> int:
return 12
def lowerCamelCase__( self :Tuple ,__snake_case :"ProcessorMixin" ,__snake_case :int = -1 ,__snake_case :int = -1 ,__snake_case :bool = False ,__snake_case :Optional["TensorType"] = None ,__snake_case :int = 3 ,__snake_case :int = 40 ,__snake_case :int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,'apply_ocr' ,__snake_case )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ = compute_effective_axis_dimension(
__snake_case ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ = processor.tokenizer.num_special_tokens_to_add(__snake_case )
a__ = compute_effective_axis_dimension(
__snake_case ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
a__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
a__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a__ = self._generate_dummy_images(__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = dict(
processor(
__snake_case ,text=__snake_case ,boxes=__snake_case ,return_tensors=__snake_case ,) )
return inputs
| 335
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''resnet'''
__lowerCAmelCase = ['''basic''', '''bottleneck''']
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=64 , _UpperCAmelCase=[256, 512, 1024, 2048] , _UpperCAmelCase=[3, 4, 6, 3] , _UpperCAmelCase="bottleneck" , _UpperCAmelCase="relu" , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
__a : Any = num_channels
__a : Optional[Any] = embedding_size
__a : Union[str, Any] = hidden_sizes
__a : List[Any] = depths
__a : Any = layer_type
__a : Optional[int] = hidden_act
__a : Dict = downsample_in_first_stage
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-3
| 101
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Any = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : str = edge
__a : int = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Any = f.read().strip().split('''\n''')
__a : Union[str, Any] = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : int = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 101
| 1
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = weight
def __repr__( self : int ):
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def lowerCAmelCase_ ( self : Any ):
return self.value
def lowerCAmelCase_ ( self : Optional[int] ):
return self.name
def lowerCAmelCase_ ( self : Dict ):
return self.weight
def lowerCAmelCase_ ( self : List[str] ):
return self.value / self.weight
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0, 0.0
for i in range(len(__UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase_ ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 31
| 1
|
'''simple docstring'''
def UpperCAmelCase ( A : bytes ):
return "".join([hex(A )[2:].zfill(2 ).upper() for byte in list(A )] )
def UpperCAmelCase ( A : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(A ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : int = ['image_processor', 'tokenizer']
_lowerCAmelCase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowerCAmelCase : List[Any] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : str , lowerCAmelCase__ : int=None , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE : Optional[Any] = features['''words''']
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel values
SCREAMING_SNAKE_CASE : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE : Any = self.get_overflowing_images(lowerCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
SCREAMING_SNAKE_CASE : Any = images
return encoded_inputs
def __lowercase ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
SCREAMING_SNAKE_CASE : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}""" )
return images_with_overflow
def __lowercase ( self : Any , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __lowercase ( self : Optional[int] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 464
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowerCAmelCase = "base_with_context"
def _lowerCAmelCase ( lowercase : List[Any] , lowercase : Tuple ) ->List[str]:
"""simple docstring"""
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__ = weights[F'''layers_{lyr_num}''']
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowercase__ = ly_weight['''attention''']
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _lowerCAmelCase ( lowercase : int , lowercase : int ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__ = weights[F'''layers_{lyr_num}''']
lowercase__ = ly_weight['''attention''']
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _lowerCAmelCase ( lowercase : Optional[int] , lowercase : Optional[Any] ) ->Dict:
"""simple docstring"""
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=lowercase )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase__ = weights[F'''layers_{lyr_num}''']
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowercase__ = ly_weight['''self_attention''']
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowercase__ = ly_weight['''MultiHeadDotProductAttention_0''']
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _lowerCAmelCase ( lowercase : Optional[int] ) ->Dict:
"""simple docstring"""
lowercase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase__ = jnp.tree_util.tree_map(onp.array , lowercase )
lowercase__ = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowercase__ = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowercase__ = inference.parse_training_gin_file(lowercase , lowercase )
lowercase__ = inference.InferenceModel(args.checkpoint_path , lowercase )
lowercase__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowercase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowercase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowercase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase__ = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , lowercase )
lowercase__ = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , lowercase )
lowercase__ = load_decoder(ta_checkpoint['''target''']['''decoder'''] , lowercase )
lowercase__ = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowercase__ = SpectrogramDiffusionPipeline(
notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_lowerCAmelCase = parser.parse_args()
main(args)
| 161
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCAmelCase = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_lowerCAmelCase = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_lowerCAmelCase = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Optional[int]:
lowercase__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowercase__ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowercase__ = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 161
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a , _a = 0 ) -> int:
lowercase_ ,lowercase_ : str = row, column
lowercase_ : int = [[default_value for c in range(_snake_case )] for r in range(_snake_case )]
def __str__(self ) -> List[str]:
lowercase_ : Optional[int] = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowercase_ : Dict = 0
for row_vector in self.array:
for obj in row_vector:
lowercase_ : str = max(_snake_case , len(str(_snake_case ) ) )
lowercase_ : Any = f'''%{max_element_length}s'''
# Make string and return
def single_line(_a ) -> str:
nonlocal string_format_identifier
lowercase_ : str = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_snake_case ) for row_vector in self.array )
return s
def __repr__(self ) -> str:
return str(self )
def _lowerCamelCase (self , _a ) -> Optional[int]:
if not (isinstance(_snake_case , (list, tuple) ) and len(_snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self , _a ) -> Dict:
assert self.validate_indicies(_snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__(self , _a , _a ) -> str:
assert self.validate_indicies(_snake_case )
lowercase_ : int = value
def __add__(self , _a ) -> Optional[Any]:
assert isinstance(_snake_case , _snake_case )
assert self.row == another.row and self.column == another.column
# Add
lowercase_ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase_ : List[Any] = self[r, c] + another[r, c]
return result
def __neg__(self ) -> Dict:
lowercase_ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase_ : Tuple = -self[r, c]
return result
def __sub__(self , _a ) -> Optional[int]:
return self + (-another)
def __mul__(self , _a ) -> Optional[Any]:
if isinstance(_snake_case , (int, float) ): # Scalar multiplication
lowercase_ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase_ : Any = self[r, c] * another
return result
elif isinstance(_snake_case , _snake_case ): # Matrix multiplication
assert self.column == another.row
lowercase_ : Optional[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase_ : List[Any] = f'''Unsupported type given for another ({type(_snake_case )})'''
raise TypeError(_snake_case )
def _lowerCamelCase (self ) -> str:
lowercase_ : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowercase_ : Tuple = self[r, c]
return result
def _lowerCamelCase (self , _a , _a ) -> int:
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase_ : Dict = v.transpose()
lowercase_ : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _UpperCamelCase ( ):
# a^(-1)
lowercase_ : str = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase_ : Tuple = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowercase_ : List[str] = Matrix(3 , 1 , 0 )
lowercase_ ,lowercase_ ,lowercase_ : Union[str, Any] = 1, 2, -3
lowercase_ : str = Matrix(3 , 1 , 0 )
lowercase_ ,lowercase_ ,lowercase_ : List[str] = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase , __UpperCamelCase )}''' )
def _UpperCamelCase ( ):
import doctest
doctest.testmod()
testa()
| 708
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A : str = TransfoXLTokenizer
A : Optional[int] = False
A : List[str] = False
def _lowerCamelCase (self ) -> List[Any]:
super().setUp()
lowercase_ : Any = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase (self , **_a ) -> Union[str, Any]:
lowercase_ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , _a ) -> Union[str, Any]:
lowercase_ : int = '<unk> UNwanted , running'
lowercase_ : int = '<unk> unwanted, running'
return input_text, output_text
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_a )
lowercase_ : Optional[Any] = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_a , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [0, 4, 8, 7] )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : List[str] = TransfoXLTokenizer(lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _lowerCamelCase (self ) -> int:
lowercase_ : Dict = TransfoXLTokenizer(lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : Dict = TransfoXLTokenizer(lower_case=_a )
lowercase_ : int = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowercase_ : List[str] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
self.assertEqual(tokenizer.convert_tokens_to_string(_a ) , _a )
def _lowerCamelCase (self ) -> Any:
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : str = len(_a )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_a ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 438
| 0
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = TransfoXLTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Any:
super().setUp()
__snake_case = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def _a ( self , **lowercase_) -> Dict:
__snake_case = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _a ( self , lowercase_) -> Tuple:
__snake_case = '<unk> UNwanted , running'
__snake_case = '<unk> unwanted, running'
return input_text, output_text
def _a ( self) -> int:
__snake_case = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowercase_)
__snake_case = tokenizer.tokenize('<unk> UNwanted , running')
self.assertListEqual(lowercase_ , ['<unk>', 'unwanted', ',', 'running'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [0, 4, 8, 7])
def _a ( self) -> Optional[Any]:
__snake_case = TransfoXLTokenizer(lower_case=lowercase_)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ') , ['hello', '!', 'how', 'are', 'you', '?'])
def _a ( self) -> List[Any]:
__snake_case = TransfoXLTokenizer(lower_case=lowercase_)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def _a ( self) -> str:
__snake_case = TransfoXLTokenizer(lower_case=lowercase_)
__snake_case = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__snake_case = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(lowercase_) , lowercase_)
self.assertEqual(tokenizer.convert_tokens_to_string(lowercase_) , lowercase_)
def _a ( self) -> str:
__snake_case = self.get_tokenizer()
__snake_case = len(lowercase_)
tokenizer.add_tokens(['new1', 'new2'])
tokenizer.move_added_token('new1' , 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowercase_) , original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1') , [1])
self.assertEqual(tokenizer.decode([1]) , 'new1')
| 313
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ : int = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def A ( snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[int]=None , snake_case__ : str=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=None , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
__snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=3_2 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = eos_token_id
__snake_case = pad_token_id
__snake_case = bos_token_id
__snake_case = initializer_range
def _a ( self) -> Any:
__snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
__snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
__snake_case = shift_tokens_right(lowercase_ , 1 , 2)
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
__snake_case = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_)
return config, inputs_dict
def _a ( self) -> List[str]:
__snake_case , __snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Tuple:
__snake_case = 2_0
__snake_case = model_class_name(lowercase_)
__snake_case = model.encode(inputs_dict['input_ids'])
__snake_case , __snake_case = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
__snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4')
__snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
__snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
__snake_case = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
__snake_case = model.decode(lowercase_ , lowercase_)
__snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = 2_0
__snake_case = model_class_name(lowercase_)
__snake_case = model.encode(inputs_dict['input_ids'])
__snake_case , __snake_case = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
__snake_case = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
__snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
__snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
__snake_case = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
__snake_case = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_)
__snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
@require_flax
class __lowercase ( unittest.TestCase ):
__UpperCAmelCase = 99
def _a ( self) -> str:
__snake_case = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__snake_case = input_ids.shape[0]
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self) -> Tuple:
__snake_case , __snake_case , __snake_case = self._get_config_and_data()
__snake_case = FlaxBlenderbotForConditionalGeneration(lowercase_)
__snake_case = lm_model(input_ids=lowercase_)
__snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_)
def _a ( self) -> Tuple:
__snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__snake_case = FlaxBlenderbotForConditionalGeneration(lowercase_)
__snake_case = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
__snake_case = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
__snake_case = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_)
__snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_)
def _a ( self) -> List[str]:
__snake_case = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
__snake_case = shift_tokens_right(lowercase_ , 1 , 2)
__snake_case = np.equal(lowercase_ , 1).astype(np.floataa).sum()
__snake_case = np.equal(lowercase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowercase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class __lowercase ( lowerCamelCase__ , unittest.TestCase , lowerCamelCase__ ):
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCAmelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _a ( self) -> Dict:
__snake_case = FlaxBlenderbotModelTester(self)
def _a ( self) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_)
def _a ( self) -> Dict:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__snake_case = self._prepare_for_class(lowercase_ , lowercase_)
__snake_case = model_class(lowercase_)
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_)
with self.subTest('JIT Enabled'):
__snake_case = encode_jitted(**lowercase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__snake_case = encode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def _a ( self) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__snake_case = model_class(lowercase_)
__snake_case = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'])
__snake_case = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest('JIT Enabled'):
__snake_case = decode_jitted(**lowercase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__snake_case = decode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _a ( self) -> str:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('facebook/blenderbot-400M-distill')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__snake_case = np.ones((1, 1)) * model.config.eos_token_id
__snake_case = model(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.')
@slow
def _a ( self) -> int:
__snake_case = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
__snake_case = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=lowercase_)
__snake_case = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B')
__snake_case = ['Sam']
__snake_case = tokenizer(lowercase_ , return_tensors='jax')
__snake_case = model.generate(**lowercase_ , **lowercase_)
__snake_case = 'Sam is a great name. It means "sun" in Gaelic.'
__snake_case = tokenizer.batch_decode(lowercase_ , **lowercase_)
assert generated_txt[0].strip() == tgt_text
| 313
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase : int = "docs/source/en/_toctree.yml"
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = defaultdict(UpperCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
__UpperCAmelCase = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase = []
for duplicate_key in duplicates:
__UpperCAmelCase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['''sections''']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__UpperCAmelCase = api_doc[model_idx]['''sections''']
__UpperCAmelCase = [(idx, section) for idx, section in enumerate(UpperCamelCase__ ) if '''sections''' in section]
__UpperCAmelCase = False
for idx, modality_doc in modalities_docs:
__UpperCAmelCase = modality_doc['''sections''']
__UpperCAmelCase = clean_model_doc_toc(UpperCamelCase__ )
if old_modality_doc != new_modality_doc:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_modality_doc
if diff:
if overwrite:
__UpperCAmelCase = model_doc
__UpperCAmelCase = api_doc
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCAmelCase : Dict = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 654
|
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "fnet"
def __init__(self , _lowercase=32000 , _lowercase=768 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu_new" , _lowercase=0.1 , _lowercase=512 , _lowercase=4 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=False , _lowercase=512 , _lowercase=3 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__a : Optional[int] = vocab_size
__a : Tuple = max_position_embeddings
__a : str = hidden_size
__a : Tuple = num_hidden_layers
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = initializer_range
__a : List[Any] = type_vocab_size
__a : Optional[int] = layer_norm_eps
__a : List[Any] = use_tpu_fourier_optimizations
__a : int = tpu_short_seq_length
| 581
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "imagegpt"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _lowercase=512 + 1 , _lowercase=32 * 32 , _lowercase=512 , _lowercase=24 , _lowercase=8 , _lowercase=None , _lowercase="quick_gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : int = vocab_size
__a : Union[str, Any] = n_positions
__a : List[str] = n_embd
__a : Union[str, Any] = n_layer
__a : List[str] = n_head
__a : int = n_inner
__a : Any = activation_function
__a : List[str] = resid_pdrop
__a : str = embd_pdrop
__a : str = attn_pdrop
__a : Tuple = layer_norm_epsilon
__a : str = initializer_range
__a : Dict = scale_attn_weights
__a : Optional[int] = use_cache
__a : Optional[Any] = scale_attn_by_inverse_layer_idx
__a : Optional[Any] = reorder_and_upcast_attn
__a : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCAmelCase__(self , _lowercase , _lowercase = 1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 32 , _lowercase = 32 , ):
'''simple docstring'''
__a : Any = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
__a : Union[str, Any] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
return inputs
| 581
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a ,'tf_padding' ) )
self.parent.assertTrue(hasattr(_a ,'depth_multiplier' ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : Dict ,_a : List[str]=13 ,_a : Union[str, Any]=3 ,_a : str=32 ,_a : List[str]=0.25 ,_a : Tuple=8 ,_a : Any=8 ,_a : Optional[int]=6 ,_a : int=32 ,_a : List[str]=True ,_a : Optional[Any]=True ,_a : int=True ,_a : Dict="relu6" ,_a : Union[str, Any]=1280 ,_a : str=0.1 ,_a : Optional[Any]=0.02 ,_a : str=True ,_a : Dict=True ,_a : List[Any]=10 ,_a : List[Any]=None ,):
'''simple docstring'''
_a : str = parent
_a : Tuple = batch_size
_a : List[str] = num_channels
_a : int = image_size
_a : Optional[int] = depth_multiplier
_a : str = depth_divisible_by
_a : int = min_depth
_a : Optional[Any] = expand_ratio
_a : str = tf_padding
_a : str = output_stride
_a : Tuple = first_layer_is_expansion
_a : Optional[Any] = finegrained_output
_a : Union[str, Any] = hidden_act
_a : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_a : List[Any] = classifier_dropout_prob
_a : int = use_labels
_a : Optional[Any] = is_training
_a : Dict = num_labels
_a : int = initializer_range
_a : Dict = scope
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
_a : Any = None
if self.use_labels:
_a : Any = ids_tensor([self.batch_size] ,self.num_labels )
_a : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,depth_divisible_by=self.depth_divisible_by ,min_depth=self.min_depth ,expand_ratio=self.expand_ratio ,output_stride=self.output_stride ,first_layer_is_expansion=self.first_layer_is_expansion ,finegrained_output=self.finegrained_output ,hidden_act=self.hidden_act ,tf_padding=self.tf_padding ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : str ,_a : Tuple ,_a : int ,_a : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
self.parent.assertEqual(
result.pooler_output.shape ,(self.batch_size, self.last_hidden_size) ,)
def __lowercase ( self : Optional[Any] ,_a : List[Any] ,_a : Any ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
_a : List[str] = self.num_labels
_a : List[Any] = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : int ,_a : Tuple ,_a : str ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.num_labels
_a : Tuple = MobileNetVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
_a : List[str] = model(_a ,labels=_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = self.prepare_config_and_inputs()
_a : Optional[int] = config_and_inputs
_a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = MobileNetVaModelTester(self )
_a : List[str] = MobileNetVaConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Any = [*signature.parameters.keys()]
_a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(_a : Any ,_a : Tuple ,_a : Union[str, Any] ):
_a : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : str = outputs.hidden_states
_a : Any = 16
self.assertEqual(len(_a ) ,_a )
_a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_a ,_a ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_a )
_a : List[Any] = self.default_image_processor
_a : Any = prepare_img()
_a : List[Any] = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape ,_a )
_a : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_a : Optional[Any] = model.to(_a )
_a : Tuple = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_a : Union[str, Any] = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : str = model(**_a )
_a : Optional[Any] = outputs.logits
# verify the logits
_a : List[Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape ,_a )
_a : Union[str, Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] ,device=_a ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_a ,atol=1E-4 ) )
| 717
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 319
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''xlm-roberta-xl'''
def __init__( self , lowerCamelCase__=250_880 , lowerCamelCase__=2_560 , lowerCamelCase__=36 , lowerCamelCase__=32 , lowerCamelCase__=10_240 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 469
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''vit_mae'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=16 , lowerCamelCase__=512 , lowerCamelCase__=8 , lowerCamelCase__=2_048 , lowerCamelCase__=0.75 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = decoder_num_attention_heads
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = decoder_num_hidden_layers
__lowerCamelCase = decoder_intermediate_size
__lowerCamelCase = mask_ratio
__lowerCamelCase = norm_pix_loss
| 469
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=128 , _snake_case=32 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
(
UpperCAmelCase
) = self.prepare_config_and_inputs()
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = NezhaModel(config=A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(A__ , attention_mask=A__ , token_type_ids=A__ )
UpperCAmelCase = model(A__ , token_type_ids=A__ )
UpperCAmelCase = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = NezhaModel(A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
UpperCAmelCase = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , )
UpperCAmelCase = model(A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = NezhaForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = NezhaForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = NezhaForPreTraining(config=A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = NezhaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = NezhaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = NezhaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = self.num_choices
UpperCAmelCase = NezhaForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
UpperCAmelCase
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self , _snake_case , _snake_case , _snake_case=False ) -> Dict:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = NezhaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=A__ , hidden_size=37 )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__ )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
(
UpperCAmelCase
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A__ )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = NezhaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@slow
@require_torch_gpu
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase = True
UpperCAmelCase = model_class(config=A__ )
UpperCAmelCase = self._prepare_for_class(A__ , A__ )
UpperCAmelCase = torch.jit.trace(
A__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A__ , os.path.join(A__ , '''bert.pt''' ) )
UpperCAmelCase = torch.jit.load(os.path.join(A__ , '''bert.pt''' ) , map_location=A__ )
loaded(inputs_dict['''input_ids'''].to(A__ ) , inputs_dict['''attention_mask'''].to(A__ ) )
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(A__ , attention_mask=A__ )[0]
UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A__ )
UpperCAmelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(A__ , attention_mask=A__ )[0]
UpperCAmelCase = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A__ )
UpperCAmelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
| 720
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__magic_name__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__magic_name__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
__magic_name__ = 0
for log in Path().glob("*.log"):
__magic_name__ = 0
with open(log, "r") as f:
for line in f:
__magic_name__ = json.loads(line)
if line.get("nodeid", "") != "":
__magic_name__ = line["nodeid"]
if line.get("duration", None) is not None:
__magic_name__ = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__magic_name__ = []
log.unlink()
__magic_name__ = ""
__magic_name__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__magic_name__ = []
__magic_name__ = {}
for test in failed_tests:
__magic_name__ = test[0].split("::")
__magic_name__ = data[0].split("/")[-1]
if data[0] not in filesafailed:
__magic_name__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__magic_name__ = [test[0] for test in failed_table]
__magic_name__ = list(set(files))
# Count number of instances in failed_tests
__magic_name__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__magic_name__ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__magic_name__ = "Too many failed tests, please see the full report in the Action results."
__magic_name__ = len(err) + 10
__magic_name__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__magic_name__ = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__magic_name__ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
__magic_name__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__magic_name__ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__magic_name__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__magic_name__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__magic_name__ = row[0]
else:
__magic_name__ = ""
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 391
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=lowercase__ , dtype=jnp.bfloataa )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
UpperCAmelCase__ : Dict = controlnet_params
UpperCAmelCase__ : int = "bird"
UpperCAmelCase__ : List[Any] = jax.device_count()
UpperCAmelCase__ : int = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
UpperCAmelCase__ : Optional[Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase__ : str = jax.random.PRNGKey(0 )
UpperCAmelCase__ : Optional[Any] = jax.random.split(lowercase__ , jax.device_count() )
UpperCAmelCase__ : Tuple = replicate(lowercase__ )
UpperCAmelCase__ : Dict = shard(lowercase__ )
UpperCAmelCase__ : Optional[int] = shard(lowercase__ )
UpperCAmelCase__ : Dict = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCAmelCase__ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCAmelCase__ : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=lowercase__ , dtype=jnp.bfloataa )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
UpperCAmelCase__ : int = controlnet_params
UpperCAmelCase__ : Optional[Any] = "Chef in the kitchen"
UpperCAmelCase__ : int = jax.device_count()
UpperCAmelCase__ : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
UpperCAmelCase__ : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase__ : Any = jax.random.PRNGKey(0 )
UpperCAmelCase__ : Tuple = jax.random.split(lowercase__ , jax.device_count() )
UpperCAmelCase__ : List[Any] = replicate(lowercase__ )
UpperCAmelCase__ : List[str] = shard(lowercase__ )
UpperCAmelCase__ : List[Any] = shard(lowercase__ )
UpperCAmelCase__ : Optional[int] = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCAmelCase__ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase__ : Any = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCAmelCase__ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase__ : Optional[Any] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 199
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """LayoutLMv2ImageProcessor"""
lowercase_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__A =kwargs.pop('''feature_extractor''' )
__A =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__A =self.image_processor(images=lowercase__ , return_tensors=lowercase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__ ):
__A =[text] # add batch dimension (as the image processor always adds a batch dimension)
__A =features['''words''']
__A =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
__A =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__A =self.get_overflowing_images(lowercase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
__A =images
return encoded_inputs
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowercase__ )} and {len(lowercase__ )}''' )
return images_with_overflow
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 184
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any]=7 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : int=3_0 , lowerCamelCase__ : str=4_0_0 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Dict=None , lowerCamelCase__ : str=0.9 , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
A_ = size if size is not None else {'''shortest_edge''': 3_0}
A_ = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = min_resolution
A_ = max_resolution
A_ = do_resize_and_center_crop
A_ = size
A_ = crop_pct
A_ = crop_size
A_ = do_normalize
A_ = image_mean
A_ = image_std
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( __a,unittest.TestCase ):
_lowercase : Optional[int] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''image_std''' ) )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 3_0, '''width''': 3_0} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A_ = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A_ = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A_ = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 719
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _lowercase :
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=1_3 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=9_9 , lowerCamelCase__ : Optional[Any]=[1, 1, 2] , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Union[str, Any]=3_7 , lowerCamelCase__ : List[Any]="gelu_new" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : List[Any]=5_1_2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=False , ) -> Union[str, Any]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = block_sizes
A_ = num_decoder_layers
A_ = d_model
A_ = n_head
A_ = d_head
A_ = d_inner
A_ = hidden_act
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = 2
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = initializer_std
# Used in the tests to check the size of the first attention layer
A_ = n_head
# Used in the tests to check the size of the first hidden state
A_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ = self.num_hidden_layers + 2
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForPreTraining(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForMaskedLM(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForSequenceClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
A_ = self.num_choices
A_ = TFFunnelForMultipleChoice(config=lowerCamelCase__ )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForTokenClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , ) -> str:
"""simple docstring"""
A_ = TFFunnelForQuestionAnswering(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : List[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Dict = False
_lowercase : Optional[int] = False
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@require_tf
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowercase : str = False
_lowercase : Any = False
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = TFFunnelModelTester(self , base=lowerCamelCase__ )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase__ )
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
| 563
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
UpperCAmelCase__ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 267
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = "deformable_detr"
lowerCamelCase__ : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a=True , a=None , a=3 , a=3_0_0 , a=1_0_2_4 , a=6 , a=1_0_2_4 , a=8 , a=6 , a=1_0_2_4 , a=8 , a=0.0 , a=True , a="relu" , a=2_5_6 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a="resnet50" , a=True , a=False , a=4 , a=4 , a=4 , a=False , a=3_0_0 , a=False , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , a=False , **a , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__ : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a , a ):
lowercase__ : Union[str, Any] = backbone_config.get('model_type' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : Union[str, Any] = config_class.from_dict(a )
lowercase__ : List[Any] = use_timm_backbone
lowercase__ : int = backbone_config
lowercase__ : Optional[int] = num_channels
lowercase__ : Optional[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = d_model
lowercase__ : List[str] = encoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : List[Any] = encoder_attention_heads
lowercase__ : str = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : List[Any] = decoder_attention_heads
lowercase__ : Optional[Any] = dropout
lowercase__ : int = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Tuple = activation_function
lowercase__ : Optional[int] = init_std
lowercase__ : List[str] = init_xavier_std
lowercase__ : Tuple = encoder_layerdrop
lowercase__ : List[str] = auxiliary_loss
lowercase__ : Tuple = position_embedding_type
lowercase__ : Tuple = backbone
lowercase__ : List[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[Any] = encoder_n_points
lowercase__ : Union[str, Any] = decoder_n_points
lowercase__ : List[Any] = two_stage
lowercase__ : str = two_stage_num_proposals
lowercase__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowercase__ : int = class_cost
lowercase__ : List[Any] = bbox_cost
lowercase__ : Optional[Any] = giou_cost
# Loss coefficients
lowercase__ : List[Any] = mask_loss_coefficient
lowercase__ : List[Any] = dice_loss_coefficient
lowercase__ : Tuple = bbox_loss_coefficient
lowercase__ : int = giou_loss_coefficient
lowercase__ : Any = eos_coefficient
lowercase__ : str = focal_alpha
lowercase__ : Any = disable_custom_kernels
super().__init__(is_encoder_decoder=a , **a )
@property
def _UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ) -> int:
return self.d_model
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : Optional[Any] = self.backbone_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
_UpperCAmelCase = ["""names""", """prefix"""]
_UpperCAmelCase = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
_UpperCAmelCase = ["""encoding_errors""", """on_bad_lines"""]
_UpperCAmelCase = ["""date_format"""]
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase_ = ","
lowerCamelCase_ = None
lowerCamelCase_ = "infer"
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = "."
lowerCamelCase_ = None
lowerCamelCase_ = '"'
lowerCamelCase_ = 0
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = None
lowerCamelCase_ = 1_0_0_0_0
lowerCamelCase_ = None
lowerCamelCase_ = "strict"
lowerCamelCase_ = "error"
lowerCamelCase_ = None
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.delimiter is not None:
A_ : Tuple = self.delimiter
if self.column_names is not None:
A_ : Tuple = self.column_names
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowercase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase_ = CsvConfig
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A_ : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase , (str, list, tuple) ):
A_ : List[str] = data_files
if isinstance(lowercase , lowercase ):
A_ : int = [files]
A_ : Tuple = [dl_manager.iter_files(lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
A_ : Any = []
for split_name, files in data_files.items():
if isinstance(lowercase , lowercase ):
A_ : str = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={'files': files} ) )
return splits
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.config.features is not None:
A_ : str = self.config.features.arrow_schema
if all(not require_storage_cast(lowercase ) for feature in self.config.features.values() ):
# cheaper cast
A_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowercase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A_ : Tuple = table_cast(lowercase , lowercase )
return pa_table
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A_ : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowercase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase ) ):
A_ : Optional[Any] = pd.read_csv(lowercase , iterator=lowercase , dtype=lowercase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowercase ):
A_ : Tuple = pa.Table.from_pandas(lowercase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowercase )}: {e}''' )
raise
| 558
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCamelCase ( __lowercase : int ,__lowercase : List[str] ,__lowercase : str ,__lowercase : Optional[Any] ,__lowercase : Any ):
'''simple docstring'''
for attribute in key.split('.' ):
A_ : Dict = getattr(__lowercase ,__lowercase )
if weight_type is not None:
A_ : Any = getattr(__lowercase ,__lowercase ).shape
else:
A_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : int = value
elif weight_type == "weight_g":
A_ : Tuple = value
elif weight_type == "weight_v":
A_ : Union[str, Any] = value
elif weight_type == "bias":
A_ : Any = value
else:
A_ : str = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( __lowercase : str ,__lowercase : Dict ,__lowercase : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : Tuple = fairseq_model.state_dict()
A_ : Any = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == 'group' ,)
A_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
A_ : str = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : str = name.split(__lowercase )[0].split('.' )[-2]
A_ : Optional[Any] = mapped_key.replace('*' ,__lowercase )
if "weight_g" in name:
A_ : Dict = 'weight_g'
elif "weight_v" in name:
A_ : Tuple = 'weight_v'
elif "weight" in name:
A_ : Union[str, Any] = 'weight'
elif "bias" in name:
A_ : Optional[Any] = 'bias'
else:
A_ : Union[str, Any] = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Union[str, Any] ,__lowercase : Any ,__lowercase : List[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = full_name.split('conv_layers.' )[-1]
A_ : Any = name.split('.' )
A_ : Dict = int(items[0] )
A_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : str ):
'''simple docstring'''
A_ : Union[str, Any] = SEWConfig()
if is_finetuned:
A_ : Any = model.wav_encoder.wav_model.cfg
else:
A_ : int = model.cfg
A_ : Any = fs_config.conv_bias
A_ : Dict = eval(fs_config.conv_feature_layers )
A_ : List[Any] = [x[0] for x in conv_layers]
A_ : Optional[Any] = [x[1] for x in conv_layers]
A_ : List[Any] = [x[2] for x in conv_layers]
A_ : Optional[int] = 'gelu'
A_ : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
A_ : Tuple = 0.0
A_ : Dict = fs_config.activation_fn.name
A_ : List[Any] = fs_config.encoder_embed_dim
A_ : int = 0.02
A_ : List[str] = fs_config.encoder_ffn_embed_dim
A_ : Any = 1e-5
A_ : Optional[Any] = fs_config.encoder_layerdrop
A_ : Optional[int] = fs_config.encoder_attention_heads
A_ : Any = fs_config.conv_pos_groups
A_ : int = fs_config.conv_pos
A_ : Tuple = len(__lowercase )
A_ : List[Any] = fs_config.encoder_layers
A_ : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A_ : Union[str, Any] = model.cfg
A_ : str = fs_config.final_dropout
A_ : Any = fs_config.layerdrop
A_ : str = fs_config.activation_dropout
A_ : Any = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A_ : str = fs_config.attention_dropout
A_ : Any = fs_config.dropout_input
A_ : Dict = fs_config.dropout
A_ : Optional[Any] = fs_config.mask_channel_length
A_ : List[str] = fs_config.mask_channel_prob
A_ : Tuple = fs_config.mask_length
A_ : Dict = fs_config.mask_prob
A_ : Any = 'Wav2Vec2FeatureExtractor'
A_ : Union[str, Any] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : int ,__lowercase : Optional[int]=None ,__lowercase : Optional[Any]=None ,__lowercase : str=True ):
'''simple docstring'''
if is_finetuned:
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A_ : Union[str, Any] = SEWConfig.from_pretrained(__lowercase )
else:
A_ : Dict = convert_config(model[0] ,__lowercase )
A_ : Union[str, Any] = model[0].eval()
A_ : Optional[int] = True if config.feat_extract_norm == 'layer' else False
A_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowercase ,return_attention_mask=__lowercase ,)
if is_finetuned:
if dict_path:
A_ : Optional[int] = Dictionary.load(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : int = target_dict.pad_index
A_ : List[Any] = target_dict.bos_index
A_ : Optional[Any] = target_dict.pad_index
A_ : str = target_dict.bos_index
A_ : str = target_dict.eos_index
A_ : str = len(target_dict.symbols )
A_ : Union[str, Any] = os.path.join(__lowercase ,'vocab.json' )
if not os.path.isdir(__lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowercase ) )
return
os.makedirs(__lowercase ,exist_ok=__lowercase )
with open(__lowercase ,'w' ,encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices ,__lowercase )
A_ : Any = WavaVecaCTCTokenizer(
__lowercase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='|' ,do_lower_case=__lowercase ,)
A_ : Tuple = WavaVecaProcessor(feature_extractor=__lowercase ,tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
A_ : Dict = SEWForCTC(__lowercase )
else:
A_ : Tuple = SEWModel(__lowercase )
feature_extractor.save_pretrained(__lowercase )
recursively_load_weights(__lowercase ,__lowercase ,__lowercase )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 558
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: List[Any] = ["""pixel_values"""]
def __init__( self : List[str] , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[int, float] = 1 / 255 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Dict , ):
super().__init__(**lowerCamelCase_ )
_lowerCAmelCase =size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_lowerCAmelCase =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ , param_name="""crop_size""" )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase =do_convert_rgb
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : List[Any] , ):
_lowerCAmelCase =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_lowerCAmelCase =get_resize_output_image_size(lowerCamelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase__ ( self : str , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Tuple , ):
_lowerCAmelCase =get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[int, float] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : str , ):
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Any , ):
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : ImageInput , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : int = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : float = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase_ : List[str] , ):
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(lowerCamelCase_ , param_name="""size""" , default_to_square=lowerCamelCase_ )
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(lowerCamelCase_ , param_name="""crop_size""" , default_to_square=lowerCamelCase_ )
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase =make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase =[convert_to_rgb(lowerCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
_lowerCAmelCase =[self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
_lowerCAmelCase ={"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 719
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =tempfile.mkdtemp()
_lowerCAmelCase =BlipImageProcessor()
_lowerCAmelCase =BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
_lowerCAmelCase =BlipProcessor(lowerCamelCase_ , lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).tokenizer
def lowerCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase_ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def lowerCAmelCase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase =[Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase =self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
_lowerCAmelCase =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =image_processor(lowerCamelCase_ , return_tensors="""np""" )
_lowerCAmelCase =processor(images=lowerCamelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =processor(text=lowerCamelCase_ )
_lowerCAmelCase =tokenizer(lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase =processor.batch_decode(lowerCamelCase_ )
_lowerCAmelCase =tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 149
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__ ="platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class A__:
lowerCAmelCase = PegasusConfig
lowerCAmelCase = {}
lowerCAmelCase = '''gelu'''
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : int=99 , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : List[Any]=37 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : Dict=0 , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _a ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict['''input_ids'''] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict['''input_ids'''] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Union[str, Any]:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = np.not_equal(UpperCAmelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCAmelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Tuple ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__SCREAMING_SNAKE_CASE = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = np.ones((1, 1) )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''np''' , truncation=__SCREAMING_SNAKE_CASE , max_length=5_12 , padding=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE , num_beams=2 ).sequences
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
assert tgt_text == decoded
| 482
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__:
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : List[Any]=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE : str=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 3, 4] , __SCREAMING_SNAKE_CASE : int=1 , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
__SCREAMING_SNAKE_CASE = num_groups
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> str:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _a ( self : int ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A__( unittest.TestCase ):
@cached_property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_torch
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase = BitConfig
lowerCAmelCase = False
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
| 482
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _UpperCamelCase( nn.Module ):
def __init__( self : Tuple , _lowerCamelCase : int = 16 , _lowerCamelCase : int = 88 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 32 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : str = "geglu" , _lowerCamelCase : Optional[int] = None , ):
super().__init__()
_UpperCAmelCase : Tuple = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_lowerCamelCase , attention_head_dim=_lowerCamelCase , in_channels=_lowerCamelCase , num_layers=_lowerCamelCase , dropout=_lowerCamelCase , norm_num_groups=_lowerCamelCase , cross_attention_dim=_lowerCamelCase , attention_bias=_lowerCamelCase , sample_size=_lowerCamelCase , num_vector_embeds=_lowerCamelCase , activation_fn=_lowerCamelCase , num_embeds_ada_norm=_lowerCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase : Optional[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase : Optional[int] = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase : Union[str, Any] = [1, 0]
def a__ ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : int=None , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : bool = True , ):
_UpperCAmelCase : Optional[Any] = hidden_states
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase : int = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase : Optional[int] = self.transformer_index_for_condition[i]
_UpperCAmelCase : Tuple = self.transformers[transformer_index](
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , timestep=_lowerCamelCase , cross_attention_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase : Optional[int] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_lowerCamelCase )
| 328
|
from __future__ import annotations
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> list[int]: # This function is recursive
"""simple docstring"""
_UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase : List[Any] = array[0]
_UpperCAmelCase : int = False
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Tuple = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase : int = longest_subsequence(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > len(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = temp_array
else:
i += 1
_UpperCAmelCase : List[Any] = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase : Any = [pivot, *longest_subsequence(_SCREAMING_SNAKE_CASE )]
if len(_SCREAMING_SNAKE_CASE ) > len(_SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )]
__UpperCAmelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(UpperCamelCase ):
__UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : List[str] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = ["".join(UpperCamelCase ) for row in temp_grid]
__UpperCAmelCase : Any = "".join(UpperCamelCase )
return output_string
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )] # generates template
for position in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Optional[int] = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : str = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__UpperCAmelCase : Union[str, Any] = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase : Tuple = input_string[counter : counter + len(UpperCamelCase )]
grid.append(list(UpperCamelCase ) )
counter += len(UpperCamelCase )
__UpperCAmelCase : List[str] = "" # reads as zigzag
for position in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : Union[str, Any] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _UpperCamelCase ( UpperCamelCase ) -> dict[int, str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = {}
for key_guess in range(1 , len(UpperCamelCase ) ): # tries every key
__UpperCAmelCase : str = decrypt(UpperCamelCase , UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
def lowerCamelCase_ ( UpperCAmelCase_ : int | float | str ):
try:
lowercase : Dict = float(UpperCAmelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
lowercase : str = decimal - int(UpperCAmelCase_ )
if fractional_part == 0:
return int(UpperCAmelCase_ ), 1
else:
lowercase : Union[str, Any] = len(str(UpperCAmelCase_ ).split('''.''' )[1] )
lowercase : List[Any] = int(decimal * (10**number_of_frac_digits) )
lowercase : str = 10**number_of_frac_digits
lowercase , lowercase : str = denominator, numerator
while True:
lowercase : Any = dividend % divisor
if remainder == 0:
break
lowercase , lowercase : Union[str, Any] = divisor, remainder
lowercase , lowercase : str = numerator / divisor, denominator / divisor
return int(UpperCAmelCase_ ), int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F'{decimal_to_fraction(2) = }')
print(F'{decimal_to_fraction(8_9.0) = }')
print(F'{decimal_to_fraction("67") = }')
print(F'{decimal_to_fraction("45.0") = }')
print(F'{decimal_to_fraction(1.5) = }')
print(F'{decimal_to_fraction("6.25") = }')
print(F'{decimal_to_fraction("78td") = }')
| 583
| 0
|
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ = 4):
'''simple docstring'''
lowerCamelCase_ : Tuple = abs(lowerCAmelCase_) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_)] for y in range(lowerCAmelCase_)]
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return reverse_row(transpose(lowerCAmelCase_))
# OR.. transpose(reverse_column(matrix))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return reverse_row(reverse_column(lowerCAmelCase_))
# OR.. reverse_column(reverse_row(matrix))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return reverse_column(transpose(lowerCAmelCase_))
# OR.. transpose(reverse_row(matrix))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = [list(lowerCAmelCase_) for x in zip(*lowerCAmelCase_)]
return matrix
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = matrix[::-1]
return matrix
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = [x[::-1] for x in matrix]
return matrix
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
for i in matrix:
print(*lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__magic_name__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__magic_name__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 702
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73
| 0
|
'''simple docstring'''
from __future__ import annotations
lowercase__ : Dict = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowercase__ : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( lowercase : list[float] ) -> list[float]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = len(lowercase )
for i in range(lowercase ):
_UpperCamelCase = -1
for j in range(i + 1, lowercase ):
if arr[i] < arr[j]:
_UpperCamelCase = arr[j]
break
result.append(lowercase )
return result
def a__ ( lowercase : list[float] ) -> list[float]:
"""simple docstring"""
_UpperCamelCase = []
for i, outer in enumerate(lowercase ):
_UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCamelCase = inner
break
result.append(lowercase )
return result
def a__ ( lowercase : list[float] ) -> list[float]:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
_UpperCamelCase = []
_UpperCamelCase = [-1] * arr_size
for index in reversed(range(lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCamelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowercase__ : Dict = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 98
|
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase__ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCamelCase__ : str = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
UpperCamelCase__ : Tuple = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase__ : Tuple = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCamelCase__ : Tuple = 'allenai'
def __UpperCamelCase( _A : int ):
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : int = dict((re.sub(R'''@@$''' , '''''' , _A ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _A ), v) for k, v in d.items() )
UpperCAmelCase__ : List[str] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCAmelCase__ : Optional[Any] = d[k] # restore
return da
def __UpperCamelCase( _A : Tuple , _A : List[Any] ):
'''simple docstring'''
# prep
assert os.path.exists(_A )
os.makedirs(_A , exist_ok=_A )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : Dict = basename(_A )
UpperCAmelCase__ : List[str] = dirname(_A )
UpperCAmelCase__ : Union[str, Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ : Dict = cls.hub_models()
UpperCAmelCase__ : Dict = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
UpperCAmelCase__ : int = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
UpperCAmelCase__ : Union[str, Any] = hub_utils.from_pretrained(
_A , _A , _A , archive_map=_A , **_A )
UpperCAmelCase__ : List[Any] = vars(chkpt['''args''']['''model'''] )
UpperCAmelCase__ : str = args['''source_lang''']
UpperCAmelCase__ : Tuple = args['''target_lang''']
UpperCAmelCase__ : Optional[int] = dirname(_A )
UpperCAmelCase__ : int = basename(_A )
# dicts
UpperCAmelCase__ : List[Any] = os.path.join(_A , F'''dict.{src_lang}.txt''' )
UpperCAmelCase__ : Tuple = os.path.join(_A , F'''dict.{tgt_lang}.txt''' )
UpperCAmelCase__ : int = Dictionary.load(_A )
UpperCAmelCase__ : Optional[int] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[Any] = len(_A )
UpperCAmelCase__ : str = os.path.join(_A , '''vocab-src.json''' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ : Tuple = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ : int = False
break
UpperCAmelCase__ : str = Dictionary.load(_A )
UpperCAmelCase__ : Tuple = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ : List[str] = len(_A )
UpperCAmelCase__ : Dict = os.path.join(_A , '''vocab-tgt.json''' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# merges_file (bpecodes)
UpperCAmelCase__ : Tuple = os.path.join(_A , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ : Dict = os.path.join(_A , _A )
if os.path.exists(_A ):
break
with open(_A , encoding='''utf-8''' ) as fin:
UpperCAmelCase__ : Optional[Any] = fin.read()
UpperCAmelCase__ : List[str] = re.sub(R''' \d+$''' , '''''' , _A , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(_A )
# model config
UpperCAmelCase__ : Optional[int] = os.path.join(_A , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCAmelCase__ : Optional[Any] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.0_2,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ : str = best_score_hparams[model_dir]['''length_penalty''']
else:
UpperCAmelCase__ : Dict = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# tokenizer config
UpperCAmelCase__ : List[str] = os.path.join(_A , _A )
UpperCAmelCase__ : Any = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 10_24,
'''do_lower_case''': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# model
UpperCAmelCase__ : Dict = chkpt['''models'''][0]
UpperCAmelCase__ : Any = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ : Tuple = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ : Union[str, Any] = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(_A , _A )
UpperCAmelCase__ : List[str] = FSMTConfig.from_pretrained(_A )
UpperCAmelCase__ : int = FSMTForConditionalGeneration(_A )
# check that it loads ok
model_new.load_state_dict(_A , strict=_A )
# save
UpperCAmelCase__ : List[str] = os.path.join(_A , _A )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_A , _A )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 614
| 0
|
import warnings
from functools import wraps
from typing import Callable
def _lowerCamelCase ( A_ : Callable ) -> Callable:
'''simple docstring'''
@wraps(A_ )
def _inner_fn(*A_ : Union[str, Any] , **A_ : Tuple ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , A_ , )
return fn(*A_ , **A_ )
return _inner_fn
| 582
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Tuple =initial_learning_rate
UpperCamelCase__ : List[str] =warmup_steps
UpperCamelCase__ : List[Any] =power
UpperCamelCase__ : Optional[Any] =decay_schedule_fn
UpperCamelCase__ : List[str] =name
def __call__( self , __SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ : Optional[Any] =tf.cast(__SCREAMING_SNAKE_CASE , tf.floataa)
UpperCamelCase__ : Tuple =tf.cast(self.warmup_steps , tf.floataa)
UpperCamelCase__ : Optional[int] =global_step_float / warmup_steps_float
UpperCamelCase__ : List[Any] =self.initial_learning_rate * tf.math.pow(__SCREAMING_SNAKE_CASE , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCamelCase ( A_ : float , A_ : int , A_ : int , A_ : float = 0.0 , A_ : float = 0.9 , A_ : float = 0.999 , A_ : float = 1E-8 , A_ : Optional[float] = None , A_ : Optional[float] = None , A_ : float = 0.0 , A_ : float = 1.0 , A_ : Optional[List[str]] = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict =tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A_ , )
if num_warmup_steps:
UpperCamelCase__ : Dict =WarmUp(
initial_learning_rate=A_ , decay_schedule_fn=A_ , warmup_steps=A_ , )
if weight_decay_rate > 0.0:
UpperCamelCase__ : Union[str, Any] =AdamWeightDecay(
learning_rate=A_ , weight_decay_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=A_ , )
else:
UpperCamelCase__ : List[Any] =tf.keras.optimizers.Adam(
learning_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 0.0_01 , __SCREAMING_SNAKE_CASE = 0.9 , __SCREAMING_SNAKE_CASE = 0.9_99 , __SCREAMING_SNAKE_CASE = 1E-7 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "AdamWeightDecay" , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =weight_decay_rate
UpperCamelCase__ : Dict =include_in_weight_decay
UpperCamelCase__ : int =exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] ={"WarmUp": WarmUp}
return super(__SCREAMING_SNAKE_CASE , cls).from_config(__SCREAMING_SNAKE_CASE , custom_objects=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self)._prepare_local(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] =self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[str] =list(zip(*__SCREAMING_SNAKE_CASE))
return super(__SCREAMING_SNAKE_CASE , self).apply_gradients(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , name=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ : Optional[int] =apply_state or {}
UpperCamelCase__ : Optional[Any] =apply_state.get((var_device, var_dtype))
if coefficients is None:
UpperCamelCase__ : Any =self._fallback_apply_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[Any] =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_dense(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None) -> Dict:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Any =self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tf.control_dependencies([decay]):
return super(__SCREAMING_SNAKE_CASE , self)._resource_apply_sparse(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any =super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is not None:
return False
return True
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self) -> int:
"""simple docstring"""
UpperCamelCase__ : str =[]
UpperCamelCase__ : List[str] =None
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if self._accum_steps is None:
UpperCamelCase__ : Any =tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
if not self._gradients:
UpperCamelCase__ : Any =self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__SCREAMING_SNAKE_CASE) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(__SCREAMING_SNAKE_CASE) != len(self._gradients):
raise ValueError(F'''Expected {len(self._gradients)} gradients, but got {len(__SCREAMING_SNAKE_CASE)}''')
for accum_gradient, gradient in zip(self._gradients , __SCREAMING_SNAKE_CASE):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__SCREAMING_SNAKE_CASE)
self._accum_steps.assign_add(1)
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__SCREAMING_SNAKE_CASE))
| 582
| 1
|
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 555
|
"""simple docstring"""
import math
import unittest
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a_ ( unittest.TestCase ):
def _snake_case ( self : int ) ->Optional[int]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 555
| 1
|
'''simple docstring'''
from __future__ import annotations
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase = 0 ):
lowerCAmelCase_ = key
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase_ = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase_ = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 606
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def snake_case_ ( __snake_case : Callable) -> Callable:
@wraps(__snake_case)
def _inner_fn(*__snake_case : str , **__snake_case : Optional[int]):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , __snake_case , )
return fn(*__snake_case , **__snake_case)
return _inner_fn
| 606
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = """ZinengTang/tvlt-base"""
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
def _a ( self , **_A ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **_A )
def _a ( self , **_A ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def _a ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : Any = self.get_feature_extractor()
UpperCamelCase : str = TvltProcessor(image_processor=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _A )
self.assertIsInstance(processor.image_processor , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_feature_extractor()
UpperCamelCase : Optional[int] = TvltProcessor(image_processor=_A , feature_extractor=_A )
UpperCamelCase : Tuple = np.ones([1_2_0_0_0] )
UpperCamelCase : Union[str, Any] = feature_extractor(_A , return_tensors="""np""" )
UpperCamelCase : List[str] = processor(audio=_A , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_image_processor()
UpperCamelCase : Tuple = self.get_feature_extractor()
UpperCamelCase : Optional[Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
UpperCamelCase : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
UpperCamelCase : Optional[Any] = image_processor(_A , return_tensors="""np""" )
UpperCamelCase : Any = processor(images=_A , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : Tuple = self.get_feature_extractor()
UpperCamelCase : Union[str, Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
UpperCamelCase : Tuple = np.ones([1_2_0_0_0] )
UpperCamelCase : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
UpperCamelCase : Optional[int] = processor(audio=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : Dict = self.get_feature_extractor()
UpperCamelCase : List[Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 102
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ () -> List[str]:
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_SCREAMING_SNAKE_CASE , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument("--batch_size" , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument("--freeze" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument("--learning_rate" , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_SCREAMING_SNAKE_CASE , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument("--weight_decay" , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--output_dir" , type=_SCREAMING_SNAKE_CASE , default="./results" )
return parser.parse_args()
UpperCamelCase = load('accuracy')
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
a_ , a_ : Tuple = eval_pred
a_ : int = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
super().__init__()
a_ : Optional[Any] = trainer
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if control.should_evaluate:
a_ : int = deepcopy(_SCREAMING_SNAKE_CASE )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCAmelCase_ () -> List[str]:
a_ : int = get_args()
set_seed(args.seed )
a_ : List[Any] = load_dataset("codeparrot/codecomplex" , split="train" )
a_ : str = dataset.train_test_split(test_size=0.2 )
a_ : Any = train_test["test"].train_test_split(test_size=0.5 )
a_ : List[str] = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
a_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a_ : Optional[Any] = tokenizer.eos_token
a_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a_ : Optional[int] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a_ : Optional[Any] = False
a_ : Optional[int] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE :str ):
a_ : List[Any] = tokenizer(example["src"] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
a_ : List[Any] = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a_ : Any = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation["train"].column_names , )
a_ : Tuple = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
a_ : Optional[Any] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print("Training..." )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 473
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _A ( unittest.TestCase ):
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCamelCase__ = BlipaProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def _a (self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 469
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : "DiagonalGaussianDistribution"
class _A ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =True
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE_ = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE_ = (64,) , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "silu" , SCREAMING_SNAKE_CASE_ = 4 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 0.18215 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCamelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , down_block_types=SCREAMING_SNAKE_CASE_ , block_out_channels=SCREAMING_SNAKE_CASE_ , layers_per_block=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , double_z=SCREAMING_SNAKE_CASE_ , )
# pass init params to Decoder
UpperCamelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , up_block_types=SCREAMING_SNAKE_CASE_ , block_out_channels=SCREAMING_SNAKE_CASE_ , layers_per_block=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase__ = False
UpperCamelCase__ = False
# only relevant if vae tiling is enabled
UpperCamelCase__ = self.config.sample_size
UpperCamelCase__ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCamelCase__ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCamelCase__ = 0.25
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (Encoder, Decoder) ):
UpperCamelCase__ = value
def _a (self , SCREAMING_SNAKE_CASE_ = True ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = use_tiling
def _a (self ) -> int:
'''simple docstring'''
self.enable_tiling(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = True
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a (self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCamelCase__ = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , '''set_processor''' ):
UpperCamelCase__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return processors
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = len(self.attn_processors.keys() )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE_ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , '''set_processor''' ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
module.set_processor(SCREAMING_SNAKE_CASE_ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Tuple:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
if self.use_slicing and x.shape[0] > 1:
UpperCamelCase__ = [self.encoder(SCREAMING_SNAKE_CASE_ ) for x_slice in x.split(1 )]
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DiagonalGaussianDistribution(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
@apply_forward_hook
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCamelCase__ = [self._decode(SCREAMING_SNAKE_CASE_ ).sample for z_slice in z.split(1 )]
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = self._decode(SCREAMING_SNAKE_CASE_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = min(a.shape[2] , b.shape[2] , SCREAMING_SNAKE_CASE_ )
for y in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = min(a.shape[3] , b.shape[3] , SCREAMING_SNAKE_CASE_ )
for x in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCamelCase__ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCamelCase__ = []
for i in range(0 , x.shape[2] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j in range(0 , x.shape[3] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE_ )
row.append(SCREAMING_SNAKE_CASE_ )
rows.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
for i, row in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j, tile in enumerate(SCREAMING_SNAKE_CASE_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(SCREAMING_SNAKE_CASE_ , dim=3 ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=2 )
UpperCamelCase__ = DiagonalGaussianDistribution(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCamelCase__ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCamelCase__ = []
for i in range(0 , z.shape[2] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j in range(0 , z.shape[3] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE_ )
row.append(SCREAMING_SNAKE_CASE_ )
rows.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
for i, row in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for j, tile in enumerate(SCREAMING_SNAKE_CASE_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(SCREAMING_SNAKE_CASE_ , dim=3 ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCamelCase__ = sample
UpperCamelCase__ = self.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
if sample_posterior:
UpperCamelCase__ = posterior.sample(generator=SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = posterior.mode()
UpperCamelCase__ = self.decode(SCREAMING_SNAKE_CASE_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
| 469
| 1
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowerCAmelCase : List[Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowerCAmelCase : Tuple = concatenate_datasets
_lowerCAmelCase : Optional[int] = DownloadConfig
_lowerCAmelCase : List[Any] = DownloadManager
_lowerCAmelCase : int = DownloadMode
_lowerCAmelCase : Any = DownloadConfig
_lowerCAmelCase : str = DownloadMode
_lowerCAmelCase : str = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 438
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCAmelCase : Optional[int] = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
_lowerCAmelCase : Optional[Any] = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase__ : Dict = state_dict.pop(snake_case )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase__ : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase__ : List[str] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase__ : Optional[int] = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , snake_case )
# ffn -> feed_forward
UpperCAmelCase__ : Any = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , snake_case )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase__ : List[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase__ : int = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase__ : Optional[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase__ : int = "rwkv." + name
UpperCAmelCase__ : Dict = weight
return state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Dict=None , snake_case : List[Any]=None , snake_case : List[str]=False , snake_case : str=None )-> int:
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase__ : str = 5_0277
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase__ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=snake_case )
UpperCAmelCase__ : Tuple = len(snake_case )
tokenizer.save_pretrained(snake_case )
# 2. Build the config
UpperCAmelCase__ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase__ : List[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
UpperCAmelCase__ : int = RwkvConfig(
vocab_size=snake_case , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case )
# 3. Download model file then convert state_dict
UpperCAmelCase__ : Optional[int] = hf_hub_download(snake_case , snake_case )
UpperCAmelCase__ : Optional[Any] = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Tuple = convert_state_dict(snake_case )
# 4. Split in shards and save
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = shard_checkpoint(snake_case )
for shard_file, shard in shards.items():
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
if index is not None:
UpperCAmelCase__ : Tuple = os.path.join(snake_case , snake_case )
# Save the index as well
with open(snake_case , "w" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Any = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
f.write(snake_case )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase__ : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase__ : Any = torch.load(os.path.join(snake_case , snake_case ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case , snake_case ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(snake_case )
model.push_to_hub(snake_case , max_shard_size="2GB" )
tokenizer.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 438
| 1
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( snake_case ) -> Dict:
'''simple docstring'''
__A = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( snake_case , snake_case ) -> List[str]:
'''simple docstring'''
__A = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
'''simple docstring'''
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(snake_case ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(snake_case )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(snake_case , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(snake_case )
__A = list_of_state_dict + embeddings(snake_case )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(snake_case , snake_case )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case )
for i in range(len(snake_case ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCamelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 341
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , UpperCAmelCase )-> List[str]:
__A = metric_id
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = [MetricMock(_a) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
'''simple docstring'''
if "tmp_path" in args:
__A = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case , match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case )
| 341
| 1
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def UpperCamelCase__ ( __magic_name__ : Tuple ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = tf.convert_to_tensor(__lowercase )
snake_case__ : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ : int = tf.convert_to_tensor(__lowercase )
snake_case__ : int = tf.cast(math.pi , x.dtype )
snake_case__ : Optional[int] = tf.cast(0.04_4715 , x.dtype )
snake_case__ : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowercase , 3 )) ))
return x * cdf
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = tf.convert_to_tensor(__lowercase )
return x * tf.tanh(tf.math.softplus(__lowercase ) )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case__ : int = tf.convert_to_tensor(__lowercase )
snake_case__ : int = tf.cast(0.04_4715 , x.dtype )
snake_case__ : str = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCamelCase__ ( __magic_name__ : str ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = tf.convert_to_tensor(__lowercase )
snake_case__ : Any = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return tf.clip_by_value(_gelu(__lowercase ) , -10 , 10 )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : List[Any]=-1 ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ : Dict = tf.split(__lowercase , 2 , axis=__lowercase )
return a * tf.math.sigmoid(__lowercase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def UpperCamelCase__ ( __magic_name__ : str ) -> Any:
'''simple docstring'''
return tf.keras.activations.gelu(__lowercase , approximate=__lowercase )
A_ : str = tf.keras.activations.gelu
A_ : str = approximate_gelu_wrap
else:
A_ : Tuple = _gelu
A_ : List[Any] = _gelu_new
A_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 38
|
def _a ( __lowercase = 1 , __lowercase = 1000 ) -> int:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 0
for divide_by_number in range(__lowercase , digit + 1 ):
__UpperCamelCase = []
__UpperCamelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = divide_by_number
else:
has_been_divided.append(__lowercase )
__UpperCamelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383
| 0
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
a = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
a = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
a = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
a = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
a = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
a = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
a = tf.keras.preprocessing.image.img_to_array(test_image)
a = np.expand_dims(test_image, axis=0)
a = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
a = '''Normal'''
if result[0][0] == 1:
a = '''Abnormality detected'''
| 710
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
_A = inspect.getfile(accelerate.test_utils )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_A = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowerCAmelCase_ ( self : List[Any] ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : Optional[Any] ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : List[str] ):
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
a = Accelerator()
a = (accelerator.state.process_index + 2, 10)
a = torch.randint(0, 10, shape).to(accelerator.device)
a = ''''''
a = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 505
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( _snake_case : float ,_snake_case : int ):
'''simple docstring'''
lowercase__ = u
for i in range(1 ,_snake_case ):
lowercase__ = temp * (u - i)
return temp
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = int(input("enter the numbers of values: " ) )
lowercase__ = []
for _ in range(_snake_case ):
y.append([] )
for i in range(_snake_case ):
for j in range(_snake_case ):
y[i].append(_snake_case )
lowercase__ = 0
print("enter the values of parameters in a list: " )
lowercase__ = list(map(_snake_case ,input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_snake_case ):
lowercase__ = float(input() )
lowercase__ = int(input("enter the value to interpolate: " ) )
lowercase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,_snake_case ):
for j in range(n - i ):
lowercase__ = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ = y[0][0]
for i in range(1 ,_snake_case ):
summ += (ucal(_snake_case ,_snake_case ) * y[0][i]) / math.factorial(_snake_case )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 267
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :str = MgpstrTokenizer
lowerCAmelCase__ :Tuple = False
lowerCAmelCase__ :Any = {}
lowerCAmelCase__ :List[Any] = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
lowercase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
def _a ( self ,**UpperCAmelCase_ ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ) -> Dict:
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
lowercase__ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token] ,add_special_tokens=UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) ,1 )
lowercase__ = tokenizer.decode(UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ )
self.assertTrue(special_token not in decoded )
def _a ( self ) -> str:
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(UpperCAmelCase_ )
lowercase__ = tokenizer.tokenize(UpperCAmelCase_ )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
lowercase__ = tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertNotEqual(len(UpperCAmelCase_ ) ,0 )
lowercase__ = tokenizer.decode(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(text_a.replace(" " ,"" ) ,UpperCAmelCase_ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _a ( self ) -> Optional[int]:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _a ( self ) -> Any:
pass
| 267
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase__ = '''BlipImageProcessor'''
lowerCAmelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] =False
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =self.image_processor
def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
snake_case__ : Tuple =self.tokenizer
snake_case__ : Optional[int] =self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
snake_case__ : Union[str, Any] =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
if text is not None:
snake_case__ : Union[str, Any] =self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
snake_case__ : Optional[Any] =None
if text_encoding is not None:
encoding_image_processor.update(__SCREAMING_SNAKE_CASE )
return encoding_image_processor
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int =self.tokenizer.model_input_names
snake_case__ : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 703
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : int =torch.nn.Linear(10 , 10 )
snake_case__ : int =torch.optim.SGD(model.parameters() , 0.1 )
snake_case__ : str =Accelerator()
snake_case__ : Any =accelerator.prepare(__SCREAMING_SNAKE_CASE )
try:
pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 408
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCAmelCase_ = TOKENIZER_CLASSES
else:
lowerCAmelCase_ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + "Fast" )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase_ = True
if checkpoint_name is None:
lowerCAmelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase_ = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCAmelCase_ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase_ , lowerCAmelCase_ = checkpoint.split("/" )
lowerCAmelCase_ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = dump_path
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase_ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
lowerCAmelCase_ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCAmelCase_ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
_A = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 290
|
from functools import reduce
_A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase__ ( __lowerCAmelCase : str = N ):
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 290
| 1
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class SCREAMING_SNAKE_CASE_ ( datasets.BuilderConfig ):
__magic_name__: Optional[datasets.Features] = None
def SCREAMING_SNAKE_CASE__ ( __a , __a , ):
import pyspark
def generate_fn():
snake_case_ : Union[str, Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
snake_case_ : Tuple = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
snake_case_ : int = partition_df.collect()
snake_case_ : int = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class SCREAMING_SNAKE_CASE_ ( _BaseExamplesIterable ):
def __init__( self : Union[str, Any] , _A : "pyspark.sql.DataFrame" , _A : Optional[Any]=None , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = df
snake_case_ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ) -> int:
"""simple docstring"""
yield from self.generate_examples_fn()
def UpperCAmelCase_ ( self : List[str] , _A : np.random.Generator ) -> "SparkExamplesIterable":
"""simple docstring"""
snake_case_ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def UpperCAmelCase_ ( self : Optional[int] , _A : int , _A : int ) -> "SparkExamplesIterable":
"""simple docstring"""
snake_case_ : Tuple = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.partition_order )
class SCREAMING_SNAKE_CASE_ ( datasets.DatasetBuilder ):
__magic_name__: Dict = SparkConfig
def __init__( self : int , _A : "pyspark.sql.DataFrame" , _A : str = None , _A : str = None , **_A : Dict , ) -> Union[str, Any]:
"""simple docstring"""
import pyspark
snake_case_ : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ : List[Any] = df
snake_case_ : Tuple = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
def create_cache_and_write_probe(_A : Optional[Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
snake_case_ : Any = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ : Any = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self : List[Any] , _A : datasets.download.download_manager.DownloadManager ) -> Tuple:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCAmelCase_ ( self : Any , _A : Dict ) -> Tuple:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(_A : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
snake_case_ : Union[str, Any] = self.df.count()
snake_case_ : Optional[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ : int = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ : int = min(_A , int(approx_total_size / max_shard_size ) )
snake_case_ : str = self.df.repartition(_A )
def UpperCAmelCase_ ( self : List[str] , _A : str , _A : str , _A : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
"""simple docstring"""
import pyspark
snake_case_ : Optional[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
snake_case_ : Dict = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
snake_case_ : List[str] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ : Optional[Any] = self.config.features
snake_case_ : List[str] = self._writer_batch_size
snake_case_ : Dict = self._fs.storage_options
def write_arrow(_A : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ : Dict = pyspark.TaskContext().taskAttemptId()
snake_case_ : Tuple = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = writer_class(
features=_A , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
snake_case_ : List[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ ,snake_case_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
snake_case_ : Tuple = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
snake_case_ : List[Any] = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
snake_case_ ,snake_case_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
snake_case_ : str = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
snake_case_ : int = (
self.df.mapInArrow(_A , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCAmelCase_ ( self : Union[str, Any] , _A : "datasets.SplitGenerator" , _A : str = "arrow" , _A : Optional[Union[str, int]] = None , _A : Optional[int] = None , **_A : Optional[int] , ) -> Dict:
"""simple docstring"""
self._validate_cache_dir()
snake_case_ : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
snake_case_ : Tuple = not is_remote_filesystem(self._fs )
snake_case_ : str = os.path.join if is_local else posixpath.join
snake_case_ : Optional[int] = '-TTTTT-SSSSS-of-NNNNN'
snake_case_ : List[str] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
snake_case_ : str = path_join(self._output_dir , _A )
snake_case_ : Any = 0
snake_case_ : List[str] = 0
snake_case_ : Tuple = 0
snake_case_ : Tuple = []
snake_case_ : Tuple = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
snake_case_ : List[str] = total_num_examples
snake_case_ : Union[str, Any] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
snake_case_ : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A : int , _A : int , _A : int , ):
rename(
_A , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
snake_case_ : Dict = []
snake_case_ : List[str] = 0
for i in range(len(_A ) ):
snake_case_ ,snake_case_ : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
snake_case_ : str = 0
snake_case_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(_A , '' ) , )
def UpperCAmelCase_ ( self : Optional[int] , _A : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 534
|
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Optional[Any] = 0
# Number of processes finished
snake_case_ : List[str] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case_ : str = [0] * no_of_process
# List to include calculation results
snake_case_ : Optional[int] = [0] * no_of_process
# Sort by arrival time.
snake_case_ : str = [burst_time[i] for i in np.argsort(__a )]
snake_case_ : str = [process_name[i] for i in np.argsort(__a )]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case_ : int = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case_ : str = arrival_time[i]
snake_case_ : Optional[Any] = 0
# Index showing the location of the process being performed
snake_case_ : Tuple = 0
# Saves the current response ratio.
snake_case_ : List[Any] = 0
for i in range(0 , __a ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case_ : Optional[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case_ : Optional[int] = temp
snake_case_ : Optional[Any] = i
# Calculate the turn around time
snake_case_ : Any = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case_ : Optional[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : List[Any] = [0] * no_of_process
for i in range(0 , __a ):
snake_case_ : Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = ["""A""", """B""", """C""", """D""", """E"""]
_SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5]
_SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5]
_SCREAMING_SNAKE_CASE = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_SCREAMING_SNAKE_CASE = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 534
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowercase_ = False
lowercase_ = True
lowercase_ = False
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowercase_ = parser.parse_args()
lowercase_ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowercase_ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowercase_ = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowercase_ = reader.read()
lowercase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowercase_ = UNetaDModel(**config)
else:
lowercase_ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowercase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowercase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowercase_ = config[key]
del config[key]
lowercase_ = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowercase_ = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowercase_ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowercase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowercase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowercase_ = param_value
lowercase_ = True
if not has_changed:
lowercase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 291
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __lowerCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =load_tool('text-question-answering' )
self.tool.setup()
_lowercase =load_tool('text-question-answering' , remote=lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.remote_tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.remote_tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
| 291
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "canine"
def __init__( self , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=16384 , lowercase__=16 , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=0 , lowercase__=0xe_0_0_0 , lowercase__=0xe_0_0_1 , lowercase__=4 , lowercase__=4 , lowercase__=8 , lowercase__=16384 , lowercase__=128 , **lowercase__ , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
# Character config:
SCREAMING_SNAKE_CASE = downsampling_rate
SCREAMING_SNAKE_CASE = upsampling_kernel_size
SCREAMING_SNAKE_CASE = num_hash_functions
SCREAMING_SNAKE_CASE = num_hash_buckets
SCREAMING_SNAKE_CASE = local_transformer_stride
| 709
|
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case = logging.get_logger(__name__)
snake_case = TypeVar('DatasetType', Dataset, IterableDataset)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "first_exhausted", ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_, (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(SCREAMING_SNAKE_CASE_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, stopping_strategy=SCREAMING_SNAKE_CASE_ )
else:
return _interleave_iterable_datasets(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, stopping_strategy=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 0, ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_, (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(SCREAMING_SNAKE_CASE_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, axis=SCREAMING_SNAKE_CASE_ )
else:
return _concatenate_iterable_datasets(SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, axis=SCREAMING_SNAKE_CASE_ )
| 406
| 0
|
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase_ :
def __init__( self : List[str] , lowerCAmelCase_ : bytes ) -> None:
UpperCAmelCase_ : Optional[Any] = data
# Initialize hash values
UpperCAmelCase_ : Union[str, Any] = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
UpperCAmelCase_ : Any = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
UpperCAmelCase_ : List[str] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : bytes ) -> bytes:
UpperCAmelCase_ : List[Any] = b"\x80" + (b"\x00" * (63 - (len(lowerCAmelCase_ ) + 8) % 64))
UpperCAmelCase_ : int = struct.pack(">Q" , (len(lowerCAmelCase_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
# Convert into blocks of 64 bytes
UpperCAmelCase_ : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase_ : Optional[Any] = list(struct.unpack(">16L" , lowerCAmelCase_ ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase_ : Any = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase_ : Dict = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase_ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
UpperCAmelCase_ : Any = self.ror(lowerCAmelCase_ , 6 ) ^ self.ror(lowerCAmelCase_ , 11 ) ^ self.ror(lowerCAmelCase_ , 25 )
UpperCAmelCase_ : Optional[int] = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
UpperCAmelCase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
UpperCAmelCase_ : str = self.ror(lowerCAmelCase_ , 2 ) ^ self.ror(lowerCAmelCase_ , 13 ) ^ self.ror(lowerCAmelCase_ , 22 )
UpperCAmelCase_ : List[Any] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase_ : Optional[Any] = (sa + maj) % 0X1_0000_0000
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
UpperCAmelCase_ : Optional[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase_ : Any = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase_ : Optional[Any] = "".join([hex(lowerCAmelCase_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int:
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> None:
import hashlib
UpperCAmelCase_ : Union[str, Any] = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(lowerCAmelCase_ ).hash , hashlib.shaaaa(lowerCAmelCase_ ).hexdigest() )
def snake_case ( ):
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"-s" ,"--string" ,dest="input_string" ,default="Hello World!! Welcome to Cryptography" ,help="Hash the string" ,)
parser.add_argument(
"-f" ,"--file" ,dest="input_file" ,help="Hash contents of a file" )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"rb" ) as f:
UpperCAmelCase_ : Dict = f.read()
else:
UpperCAmelCase_ : Union[str, Any] = bytes(A__ ,"utf-8" )
print(SHAaaa(A__ ).hash )
if __name__ == "__main__":
main()
| 95
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=64 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : int = parent
A__ : Optional[Any] = batch_size
A__ : Optional[Any] = seq_length
A__ : Any = is_training
A__ : Tuple = use_input_mask
A__ : Optional[int] = use_token_type_ids
A__ : Tuple = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Optional[Any] = embedding_size
A__ : Optional[int] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Tuple = intermediate_size
A__ : Tuple = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : Optional[Any] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Optional[Any] = type_sequence_label_size
A__ : str = initializer_range
A__ : Any = num_labels
A__ : Dict = num_choices
A__ : List[str] = scope
def __A ( self ):
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A__ : List[str] = None
if self.use_token_type_ids:
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any = None
A__ : str = None
A__ : Dict = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Dict = MegatronBertModel(config=A__ )
model.to(A__ )
model.eval()
A__ : str = model(A__ , attention_mask=A__ , token_type_ids=A__ )
A__ : Optional[Any] = model(A__ , token_type_ids=A__ )
A__ : Dict = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = MegatronBertForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Tuple = MegatronBertForCausalLM(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Dict = MegatronBertForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = MegatronBertForPreTraining(config=A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = MegatronBertForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = self.num_labels
A__ : Union[str, Any] = MegatronBertForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Union[str, Any] = self.num_labels
A__ : int = MegatronBertForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = self.num_choices
A__ : Dict = MegatronBertForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
A__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Any = config_and_inputs
A__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: Tuple = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: int = True
# test_resize_embeddings = False
UpperCAmelCase__: List[str] = False
def __A ( self , A__ , A__ , A__=False ):
A__ : Union[str, Any] = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
A__ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
A__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def __A ( self ):
A__ : Union[str, Any] = MegatronBertModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A__ )
def __A ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A__ )
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A__ )
def __A ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A__ )
def UpperCamelCase (lowercase_: Optional[int] ) -> List[Any]:
return torch.tensor(
lowercase_ , dtype=torch.long , device=lowercase_ , )
A_ : int = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def __A ( self ):
A__ : int = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
A__ : Dict = os.path.join(os.environ["""MYDIR"""] , A__ )
A__ : List[str] = MegatronBertModel.from_pretrained(A__ )
model.to(A__ )
model.half()
A__ : Union[str, Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
A__ : Dict = model(A__ )[0]
A__ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A__ )
A__ : int = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
A__ : List[Any] = output[0, ii, jj]
A__ : Dict = expected[3 * ii + jj]
A__ : Dict = """ii={} jj={} a={} b={}""".format(A__ , A__ , A__ , A__ )
self.assertTrue(math.isclose(A__ , A__ , rel_tol=A__ , abs_tol=A__ ) , msg=A__ )
| 456
| 0
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A : List[str] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int , lowercase__ : Path , lowercase__ : Union[str, None] = None , lowercase__ : Union[List[str], None] = None , lowercase__ : Union[str, List[str], None] = None , lowercase__ : bool = True , ):
__lowercase : Optional[int] = [file for file in os.listdir(lowercase__ ) if os.path.isfile(os.path.join(lowercase__ , lowercase__ ) )]
if identifier is not None:
__lowercase : Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase__ , lowercase__ ):
for n_ in n_identifier:
__lowercase : str = [file for file in files if n_ not in file]
else:
__lowercase : List[str] = [file for file in files if n_identifier not in file]
__lowercase : Tuple = ignore_files or []
ignore_files.append("__init__.py" )
__lowercase : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase__ )
if only_modules:
__lowercase : Any = file.split("." )[0]
try:
__lowercase : Any = getattr(lowercase__ , lowercase__ )
__lowercase : Dict = doctest.DocTestSuite(lowercase__ )
__lowercase : Tuple = unittest.TextTestRunner().run(lowercase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
__lowercase : List[str] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def snake_case ( self : Tuple ):
__lowercase : int = Path("src/transformers" )
__lowercase : List[str] = "modeling"
__lowercase : str = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase__ , identifier=lowercase__ , ignore_files=lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Union[str, Any] = Path("src/transformers" )
__lowercase : List[Any] = "tokenization"
self.analyze_directory(lowercase__ , identifier=lowercase__ )
def snake_case ( self : Any ):
__lowercase : Tuple = Path("src/transformers" )
__lowercase : Union[str, Any] = "configuration"
self.analyze_directory(lowercase__ , identifier=lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase : Optional[Any] = Path("src/transformers" )
__lowercase : List[str] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase__ , n_identifier=lowercase__ )
def snake_case ( self : Optional[Any] ):
__lowercase : Dict = Path("docs/source" )
__lowercase : Any = ["favicon.ico"]
self.analyze_directory(lowercase__ , ignore_files=lowercase__ , only_modules=lowercase__ )
| 281
|
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None ) ->list[list[str]]:
"""simple docstring"""
__lowercase : List[Any] = word_bank or []
# create a table
__lowercase : int = len(_lowerCamelCase ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(_lowerCamelCase ):
table.append([] )
# seed value
__lowercase : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowerCamelCase )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowerCamelCase )]:
combination.reverse()
return table[len(_lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 281
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
def __init__( self : Any , UpperCAmelCase : int ) -> None:
lowerCAmelCase :str = num_of_nodes
lowerCAmelCase :list[list[int]] = []
lowerCAmelCase :dict[int, int] = {}
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : int ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : int ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase :Optional[int] = self.find_component(UpperCAmelCase )
def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : list[int] , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase :Union[str, Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase :Optional[int] = self.find_component(UpperCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase )
def UpperCAmelCase__ ( self : Dict ) -> None:
lowerCAmelCase :Union[str, Any] = []
lowerCAmelCase :Union[str, Any] = 0
lowerCAmelCase :list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase :Any = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[Any] = edge
lowerCAmelCase :str = self.m_component[u]
lowerCAmelCase :Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase :str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Union[str, Any] = edge
lowerCAmelCase :Tuple = self.m_component[u]
lowerCAmelCase :int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase :int = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCAmelCase ( a__ , a__=None ):
'''simple docstring'''
lowerCAmelCase :str = None
if token is not None:
lowerCAmelCase :List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
lowerCAmelCase :Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase :Optional[Any] = requests.get(a__ , headers=a__ ).json()
lowerCAmelCase :Tuple = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCAmelCase :List[Any] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(a__ ):
lowerCAmelCase :List[Any] = requests.get(url + F"""&page={i + 2}""" , headers=a__ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def UpperCAmelCase ( a__ , a__=None ):
'''simple docstring'''
lowerCAmelCase :Optional[Any] = None
if token is not None:
lowerCAmelCase :Any = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
lowerCAmelCase :Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase :Any = requests.get(a__ , headers=a__ ).json()
lowerCAmelCase :str = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
lowerCAmelCase :List[Any] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(a__ ):
lowerCAmelCase :List[Any] = requests.get(url + F"""&page={i + 2}""" , headers=a__ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Optional[Any] = None
if token is not None:
lowerCAmelCase :Optional[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
lowerCAmelCase :Tuple = requests.get(a__ , headers=a__ , allow_redirects=a__ )
lowerCAmelCase :Optional[int] = result.headers['Location']
lowerCAmelCase :int = requests.get(a__ , allow_redirects=a__ )
lowerCAmelCase :Union[str, Any] = os.path.join(a__ , F"""{artifact_name}.zip""" )
with open(a__ , 'wb' ) as fp:
fp.write(response.content )
def UpperCAmelCase ( a__ , a__=None ):
'''simple docstring'''
lowerCAmelCase :Optional[Any] = []
lowerCAmelCase :Dict = []
lowerCAmelCase :Optional[Any] = None
with zipfile.ZipFile(a__ ) as z:
for filename in z.namelist():
if not os.path.isdir(a__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a__ ) as f:
for line in f:
lowerCAmelCase :int = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase :int = line[: line.index(': ' )]
lowerCAmelCase :int = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
lowerCAmelCase :int = line[len('FAILED ' ) :]
failed_tests.append(a__ )
elif filename == "job_name.txt":
lowerCAmelCase :List[str] = line
if len(a__ ) != len(a__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(a__ )} for `errors` """
F"""and {len(a__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
lowerCAmelCase :Optional[int] = None
if job_name and job_links:
lowerCAmelCase :Dict = job_links.get(a__ , a__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase :Union[str, Any] = [x + [y] + [job_link] for x, y in zip(a__ , a__ )]
return result
def UpperCAmelCase ( a__ , a__=None ):
'''simple docstring'''
lowerCAmelCase :Any = []
lowerCAmelCase :Optional[int] = [os.path.join(a__ , a__ ) for p in os.listdir(a__ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a__ , job_links=a__ ) )
return errors
def UpperCAmelCase ( a__ , a__=None ):
'''simple docstring'''
lowerCAmelCase :int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase :Tuple = counter.most_common()
lowerCAmelCase :Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase :Optional[int] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase :Optional[Any] = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) )
return r
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = test.split('::' )[0]
if test.startswith('tests/models/' ):
lowerCAmelCase :Union[str, Any] = test.split('/' )[2]
else:
lowerCAmelCase :Optional[int] = None
return test
def UpperCAmelCase ( a__ , a__=None ):
'''simple docstring'''
lowerCAmelCase :str = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase :Any = [x for x in logs if x[2] is not None]
lowerCAmelCase :Tuple = {x[2] for x in logs}
lowerCAmelCase :Optional[Any] = {}
for test in tests:
lowerCAmelCase :Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase :List[str] = counter.most_common()
lowerCAmelCase :str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase :Optional[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase :Dict = {'count': n_errors, 'errors': error_counts}
lowerCAmelCase :Dict = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) )
return r
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :Any = '| no. | error | status |'
lowerCAmelCase :Optional[int] = '|-:|:-|:-|'
lowerCAmelCase :Tuple = [header, sep]
for error in reduced_by_error:
lowerCAmelCase :Optional[Any] = reduced_by_error[error]['count']
lowerCAmelCase :List[str] = F"""| {count} | {error[:1_00]} | |"""
lines.append(a__ )
return "\n".join(a__ )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :Any = '| model | no. of errors | major error | count |'
lowerCAmelCase :int = '|-:|-:|-:|-:|'
lowerCAmelCase :int = [header, sep]
for model in reduced_by_model:
lowerCAmelCase :Dict = reduced_by_model[model]['count']
lowerCAmelCase , lowerCAmelCase :Any = list(reduced_by_model[model]['errors'].items() )[0]
lowerCAmelCase :Any = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(a__ )
return "\n".join(a__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
__SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__SCREAMING_SNAKE_CASE = k.find(' / ')
__SCREAMING_SNAKE_CASE = k[index + len(' / ') :]
__SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__SCREAMING_SNAKE_CASE = reduce_by_error(errors)
__SCREAMING_SNAKE_CASE = reduce_by_model(errors)
__SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
__SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 553
| 1
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase_ ( A__ : Dict[str, torch.Tensor] ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = []
for rt in rc.restypes:
lowerCAmelCase_ : str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCAmelCase_ : str = {name: i for i, name in enumerate(__a )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCAmelCase_ : str = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCAmelCase_ : Optional[Any] = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCAmelCase_ : Dict = torch.tensor(
__a , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowerCAmelCase_ : List[str] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCAmelCase_ : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
lowerCAmelCase_ : int = restype_atomaa_mask[protein_aatype]
lowerCAmelCase_ : List[str] = residx_atomaa_mask
lowerCAmelCase_ : Optional[int] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCAmelCase_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
lowerCAmelCase_ : List[str] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCAmelCase_ : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCAmelCase_ : Tuple = rc.restype_atoa[restype_letter]
lowerCAmelCase_ : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCAmelCase_ : List[Any] = rc.atom_order[atom_name]
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : List[str] = restype_atomaa_mask[protein_aatype]
lowerCAmelCase_ : Dict = residx_atomaa_mask
return protein
def UpperCamelCase_ ( A__ : Dict[str, torch.Tensor] ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = tree_map(lambda A__ : torch.tensor(__a , device=batch["""aatype"""].device ) , __a , np.ndarray )
lowerCAmelCase_ : int = tensor_tree_map(lambda A__ : np.array(__a ) , make_atomaa_masks(__a ) )
return out
| 720
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__A : Tuple = TypeVar("T")
__A : Optional[Any] = TypeVar("U")
class __snake_case ( Generic[T, U]):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : T | None , lowerCamelCase : U | None ) -> Dict:
lowerCAmelCase_ : List[str] = key
lowerCAmelCase_ : List[str] = val
lowerCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
lowerCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Union[str, Any] ) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __snake_case ( Generic[T, U]):
"""simple docstring"""
def __init__( self : int ) -> None:
lowerCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.rear, self.head
def __repr__( self : str ) -> str:
lowerCAmelCase_ : List[str] = ["""DoubleLinkedList"""]
lowerCAmelCase_ : str = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
lowerCAmelCase_ : Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : DoubleLinkedListNode[T, U] ) -> None:
lowerCAmelCase_ : Optional[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCAmelCase_ : Tuple = node
lowerCAmelCase_ : int = previous
lowerCAmelCase_ : List[str] = node
lowerCAmelCase_ : Optional[int] = self.rear
def __lowercase ( self : List[Any] , lowerCamelCase : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
lowerCAmelCase_ : Any = node.next
lowerCAmelCase_ : Any = node.prev
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[int] = None
return node
class __snake_case ( Generic[T, U]):
"""simple docstring"""
lowercase = {}
def __init__( self : Tuple , lowerCamelCase : int ) -> Union[str, Any]:
lowerCAmelCase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowerCAmelCase_ : Tuple = capacity
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Optional[int] ) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : Optional[int] , lowerCamelCase : T ) -> bool:
return key in self.cache
def __lowercase ( self : Dict , lowerCamelCase : T ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowerCAmelCase_ : DoubleLinkedListNode[T, U] = self.cache[key]
lowerCAmelCase_ : List[str] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def __lowercase ( self : str , lowerCamelCase : T , lowerCamelCase : U ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCAmelCase_ : List[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCAmelCase_ : Optional[Any] = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCAmelCase_ : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCAmelCase_ : Tuple = value
self.list.add(lowerCamelCase )
@classmethod
def __lowercase ( cls : int , lowerCamelCase : int = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCAmelCase_ : int = LRUCache(lowerCamelCase )
lowerCAmelCase_ : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCAmelCase_ : Dict = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , """cache_info""" , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398
| 0
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __UpperCamelCase ( ):
A_ : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=SCREAMING_SNAKE_CASE_ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=SCREAMING_SNAKE_CASE_ , default=5 )
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE_ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument("""--freeze""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--learning_rate""" , type=SCREAMING_SNAKE_CASE_ , default=5E-4 )
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE_ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=SCREAMING_SNAKE_CASE_ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=SCREAMING_SNAKE_CASE_ , default=10 )
parser.add_argument("""--weight_decay""" , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument("""--output_dir""" , type=SCREAMING_SNAKE_CASE_ , default="""./results""" )
return parser.parse_args()
_lowerCAmelCase = load("accuracy")
def __UpperCamelCase ( snake_case__ ):
A_ : Any = eval_pred
A_ : Union[str, Any] = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ ):
super().__init__()
A_ : List[Any] = trainer
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
if control.should_evaluate:
A_ : Dict = deepcopy(lowercase__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __UpperCamelCase ( ):
A_ : List[str] = get_args()
set_seed(args.seed )
A_ : List[Any] = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A_ : Optional[Any] = dataset.train_test_split(test_size=0.2 )
A_ : Union[str, Any] = train_test["test"].train_test_split(test_size=0.5 )
A_ : List[str] = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
A_ : List[Any] = tokenizer.eos_token
A_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A_ : int = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A_ : Optional[int] = False
A_ : List[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(snake_case__ ):
A_ : Tuple = tokenizer(example["""src"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=1_024 )
A_ : Dict = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A_ : Union[str, Any] = train_test_validation.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=train_test_validation["""train"""].column_names , )
A_ : List[Any] = DataCollatorWithPadding(tokenizer=SCREAMING_SNAKE_CASE_ )
A_ : Optional[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A_ : Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(SCREAMING_SNAKE_CASE_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 180
|
'''simple docstring'''
import re
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 421
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = number
while duplicate > 0:
__magic_name__ , __magic_name__ = divmod(__lowerCamelCase , 1_0 )
fact_sum += factorial(__lowerCamelCase )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowercase = int(input('''Enter number: ''').strip())
print(
f'''{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'''
)
| 468
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int = 2_0_0_0_0_0_0 ):
'''simple docstring'''
__magic_name__ = [0 for i in range(n + 1 )]
__magic_name__ = 1
__magic_name__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowerCamelCase ):
__magic_name__ = 1
__magic_name__ = 0
for i in range(__lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 468
| 1
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case = '''src/diffusers'''
_snake_case = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_snake_case = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_snake_case = spec.loader.load_module()
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE ) is not None
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = object_name.split('.' )
_lowerCAmelCase = 0
# First let's find the module where our object lives.
_lowerCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
_snake_case = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_snake_case = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_snake_case = re.compile(R'''<FILL\s+[^>]*>''')
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = code.split('\n' )
_lowerCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
_lowerCAmelCase = f"""class Bla:\n{code}"""
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len('class Bla:\n' ) :] if has_indent else result
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: List[str]=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = []
_lowerCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = search.groups()
_lowerCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_indent(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCAmelCase = theoretical_indent
_lowerCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
_lowerCAmelCase = lines[line_index]
_lowerCAmelCase = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
_lowerCAmelCase = ''.join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCAmelCase = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = replace_pattern.replace('with' , '' ).split(',' )
_lowerCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pattern.groups()
_lowerCAmelCase = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
_lowerCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_lowerCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def __snake_case ( SCREAMING_SNAKE_CASE: bool = False ):
"""simple docstring"""
_lowerCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = []
for filename in all_files:
_lowerCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_snake_case = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 580
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ (unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoConfig.from_pretrained("gpt2" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
lowerCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = {
"max_new_tokens": 1024,
"foo": "bar",
}
lowerCAmelCase_ = copy.deepcopy(_a )
lowerCAmelCase_ = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {"foo": "bar"} )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
assert not hasattr(_a , "foo" ) # no new kwargs should be initialized if from config
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ (unittest.TestCase ):
@classmethod
def __a ( cls ) -> Optional[Any]:
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_a )
@classmethod
def __a ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="test-generation-config" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="valid_org/test-generation-config-org" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
| 226
|
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: int ):
return sum(int(__a ) for c in str(abs(__a ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Callable , __a: int ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__a )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = (3, 32, 128)
__A : List[str] = tempfile.mkdtemp()
# fmt: off
__A : Any = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__A : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
__A : Tuple = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
__A : Any = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
__A : Optional[int] = Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1))
return image_input
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_tokenizer()
__A : List[Any] = self.get_image_processor()
__A : List[Any] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_tokenizer()
__A : List[str] = self.get_image_processor()
__A : int = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Optional[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Any = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : int = self.prepare_image_inputs()
__A : Tuple = image_processor(_UpperCAmelCase , return_tensors='np')
__A : Optional[Any] = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = 'test'
__A : Union[str, Any] = processor(text=_UpperCAmelCase)
__A : Tuple = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : List[str] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'test'
__A : Dict = self.prepare_image_inputs()
__A : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__A : str = processor.char_decode(_UpperCAmelCase)
__A : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase)
__A : Optional[int] = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Dict = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : int = None
__A : Any = self.prepare_image_inputs()
__A : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Optional[int] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = torch.randn(1 , 27 , 38)
__A : Any = torch.randn(1 , 27 , 5_0257)
__A : List[Any] = torch.randn(1 , 27 , 3_0522)
__A : List[Any] = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 8
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowerCAmelCase ( *lowerCAmelCase ):
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = list(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowerCAmelCase ( lowerCAmelCase = None , lowerCAmelCase = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(lowerCAmelCase , starting_batch_size=lowerCAmelCase )
UpperCAmelCase = starting_batch_size
def decorator(*lowerCAmelCase , **lowerCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase = list(inspect.signature(lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase ) < (len(lowerCAmelCase ) + 1):
UpperCAmelCase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 673
| 0
|
'''simple docstring'''
import numpy as np
def a__ ( lowerCAmelCase__ ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase__ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
UpperCAmelCase__ : int = CLIPTextModel(_A )
UpperCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase__ : Dict = 77
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Dict=0 ):
'''simple docstring'''
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase__ : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(_A )
UpperCAmelCase__ : str = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**_A )
UpperCAmelCase__ : Any = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase__ : int = '''A photo of an astronaut'''
UpperCAmelCase__ : Dict = alt_pipe(**_A )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Union[str, Any] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : str = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(_A )
UpperCAmelCase__ : Any = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**_A )
UpperCAmelCase__ : Tuple = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Dict = alt_pipe(**_A )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_A )
UpperCAmelCase__ : Dict = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Any = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_A , safety_checker=_A )
UpperCAmelCase__ : List[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='''numpy''' )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 312
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any =logging.get_logger(__name__)
_lowerCAmelCase : Dict ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __UpperCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
__magic_name__ = "realm"
def __init__( self , lowerCamelCase__=3_0_5_2_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=8 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=2_5_6 , lowerCamelCase__=1_0 , lowerCamelCase__=1e-3 , lowerCamelCase__=5 , lowerCamelCase__=3_2_0 , lowerCamelCase__=1_3_3_5_3_7_1_8 , lowerCamelCase__=5_0_0_0 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Common config
UpperCAmelCase__: Optional[int] = vocab_size
UpperCAmelCase__: List[Any] = max_position_embeddings
UpperCAmelCase__: Optional[int] = hidden_size
UpperCAmelCase__: Dict = retriever_proj_size
UpperCAmelCase__: Dict = num_hidden_layers
UpperCAmelCase__: Tuple = num_attention_heads
UpperCAmelCase__: List[Any] = num_candidates
UpperCAmelCase__: Tuple = intermediate_size
UpperCAmelCase__: List[str] = hidden_act
UpperCAmelCase__: Any = hidden_dropout_prob
UpperCAmelCase__: List[Any] = attention_probs_dropout_prob
UpperCAmelCase__: Tuple = initializer_range
UpperCAmelCase__: str = type_vocab_size
UpperCAmelCase__: Optional[int] = layer_norm_eps
# Reader config
UpperCAmelCase__: Dict = span_hidden_size
UpperCAmelCase__: Any = max_span_width
UpperCAmelCase__: Dict = reader_layer_norm_eps
UpperCAmelCase__: Dict = reader_beam_size
UpperCAmelCase__: Tuple = reader_seq_len
# Retrieval config
UpperCAmelCase__: int = num_block_records
UpperCAmelCase__: List[Any] = searcher_beam_size
| 113
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""PerceiverFeatureExtractor"""]
UpperCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"text": Value("string" )} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({} )
_SCREAMING_SNAKE_CASE : str = "text"
@property
def lowerCAmelCase__ ( self ):
return {self.text_column: "text"}
| 321
|
def lowerCamelCase ( UpperCAmelCase_ : int )-> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> tuple:
'''simple docstring'''
a_ = namedtuple("result" ,"name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" ,power / current )
elif current == 0:
return result("current" ,power / voltage )
elif power == 0:
return result("power" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 716
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
"""simple docstring"""
def snake_case_ ( self : List[Any] ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__lowerCAmelCase , )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> List[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__lowerCAmelCase , )
def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase__ ( _A):
"""simple docstring"""
@require_beam
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
_A = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def snake_case_ ( self : int ) -> str:
import apache_beam as beam
_A = beam.io.parquetio.WriteToParquet
_A = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_A = partial(__lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case_ ( self : Any ) -> int:
_A = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = NestedBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 2
|
'''simple docstring'''
import os
import sys
import unittest
_a : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_a : int = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
_a : int = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class __A (unittest.TestCase ):
def _snake_case ( self ):
__UpperCAmelCase : Any = get_test_to_tester_mapping(UpperCamelCase_ )
__UpperCAmelCase : Dict = get_test_to_tester_mapping(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = {"BertModelTest": "BertModelTester"}
__UpperCAmelCase : Optional[Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = get_model_to_test_mapping(UpperCamelCase_ )
__UpperCAmelCase : Tuple = get_model_to_test_mapping(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__UpperCAmelCase : int = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = get_model_to_tester_mapping(UpperCamelCase_ )
__UpperCAmelCase : int = get_model_to_tester_mapping(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__UpperCAmelCase : Union[str, Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
| 168
| 0
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int=3 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : List[str]=[10, 20, 30, 40] , _UpperCAmelCase : Union[str, Any]=[1, 1, 2, 1] , _UpperCAmelCase : Any=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Optional[Any]=None , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(_UpperCAmelCase )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def a__ ( self : int ) -> int:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = RegNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = RegNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Union[str, Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ : str = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[Any] = False
def a__ ( self : Any ) -> str:
"""simple docstring"""
__lowercase = RegNetModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(config=_UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ):
__lowercase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase = layer_type
__lowercase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = RegNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> int:
__lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_UpperCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__lowercase = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 688
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688
| 1
|
"""simple docstring"""
import numpy
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
lowerCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ = numpy.zeros(output_array.shape )
def _lowerCAmelCase ( self ) -> numpy.ndarray:
'''simple docstring'''
lowerCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCAmelCase ( self ) -> None:
'''simple docstring'''
lowerCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
lowerCamelCase_ = input_arr
lowerCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase_ ( _lowerCamelCase : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase_ ( _lowerCamelCase : numpy.ndarray ):
return (value) * (1 - (value))
def lowerCamelCase_ ( ):
lowerCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=1_0 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 142
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :torch.FloatTensor
class lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCamelCase__ = 3 , UpperCamelCase__ = 3 , UpperCamelCase__ = ("DownEncoderBlock2D",) , UpperCamelCase__ = ("UpDecoderBlock2D",) , UpperCamelCase__ = (64,) , UpperCamelCase__ = 1 , UpperCamelCase__ = "silu" , UpperCamelCase__ = 3 , UpperCamelCase__ = 32 , UpperCamelCase__ = 256 , UpperCamelCase__ = 32 , UpperCamelCase__ = None , UpperCamelCase__ = 0.18_215 , UpperCamelCase__ = "group" , ) -> Any:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowerCamelCase_ = Encoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , down_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , double_z=UpperCamelCase__ , )
lowerCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
lowerCamelCase_ = VectorQuantizer(UpperCamelCase__ , UpperCamelCase__ , beta=0.25 , remap=UpperCamelCase__ , sane_index_shape=UpperCamelCase__ )
lowerCamelCase_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
# pass init params to Decoder
lowerCamelCase_ = Decoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , up_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , norm_type=UpperCamelCase__ , )
@apply_forward_hook
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = True ) -> VQEncoderOutput:
'''simple docstring'''
lowerCamelCase_ = self.encoder(UpperCamelCase__ )
lowerCamelCase_ = self.quant_conv(UpperCamelCase__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase__ )
@apply_forward_hook
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.quantize(UpperCamelCase__ )
else:
lowerCamelCase_ = h
lowerCamelCase_ = self.post_quant_conv(UpperCamelCase__ )
lowerCamelCase_ = self.decoder(UpperCamelCase__ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
lowerCamelCase_ = sample
lowerCamelCase_ = self.encode(UpperCamelCase__ ).latents
lowerCamelCase_ = self.decode(UpperCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
| 142
| 1
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Any = np.argmax(UpperCamelCase , axis=1 )
return np.sum(outputs == labels )
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
with open(UpperCamelCase , encoding='''utf_8''' ) as f:
lowerCAmelCase__ : str = csv.reader(UpperCamelCase )
lowerCAmelCase__ : str = []
next(UpperCamelCase ) # skip the first line
for line in tqdm(UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Tuple = []
for dataset in encoded_datasets:
lowerCAmelCase__ : Any = len(UpperCamelCase )
lowerCAmelCase__ : Any = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Union[str, Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : List[Any] = len(UpperCamelCase ) - 1
lowerCAmelCase__ : str = len(UpperCamelCase ) - 1
lowerCAmelCase__ : Union[str, Any] = with_conta
lowerCAmelCase__ : Dict = with_conta
lowerCAmelCase__ : List[Any] = mc_label
lowerCAmelCase__ : int = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def __lowerCAmelCase ( ) -> str:
lowerCAmelCase__ : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=UpperCamelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=UpperCamelCase , default='''''' )
parser.add_argument('''--seed''' , type=UpperCamelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=UpperCamelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=UpperCamelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=UpperCamelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=UpperCamelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=UpperCamelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=UpperCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=UpperCamelCase , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=UpperCamelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=UpperCamelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=UpperCamelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=UpperCamelCase , default=374 )
parser.add_argument('''--server_ip''' , type=UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase__ : Tuple = parser.parse_args()
print(UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase__ : List[str] = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(UpperCamelCase , UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Any = ['''_start_''', '''_delimiter_''', '''_classify_''']
lowerCAmelCase__ : Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase ) )
model.to(UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase ):
if isinstance(UpperCamelCase , UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase ) )
elif isinstance(UpperCamelCase , UpperCamelCase ):
return obj
return [tokenize_and_encode(UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
lowerCAmelCase__ : Any = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : Any = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Optional[int] = (train_dataset, eval_dataset)
lowerCAmelCase__ : Union[str, Any] = tokenize_and_encode(UpperCamelCase )
# Compute the max input length for the Transformer
lowerCAmelCase__ : List[str] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Dict = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Optional[Any] = min(UpperCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : Optional[Any] = pre_process_datasets(UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase )
lowerCAmelCase__ : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : Tuple = TensorDataset(*UpperCamelCase )
lowerCAmelCase__ : Optional[int] = RandomSampler(UpperCamelCase )
lowerCAmelCase__ : int = DataLoader(UpperCamelCase , sampler=UpperCamelCase , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[int] = TensorDataset(*UpperCamelCase )
lowerCAmelCase__ : Dict = SequentialSampler(UpperCamelCase )
lowerCAmelCase__ : Any = DataLoader(UpperCamelCase , sampler=UpperCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Dict = args.max_steps
lowerCAmelCase__ : Optional[int] = args.max_steps // (len(UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Dict = len(UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Tuple = list(model.named_parameters() )
lowerCAmelCase__ : List[str] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
lowerCAmelCase__ : Optional[Any] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
lowerCAmelCase__ : Optional[int] = AdamW(UpperCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : str = get_linear_schedule_with_warmup(
UpperCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase )
if args.do_train:
lowerCAmelCase__ : Any = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Any = tqdm(UpperCamelCase , desc='''Training''' )
for step, batch in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(UpperCamelCase ) for t in batch )
lowerCAmelCase__ : Optional[int] = batch
lowerCAmelCase__ : Optional[Any] = model(UpperCamelCase , mc_token_ids=UpperCamelCase , lm_labels=UpperCamelCase , mc_labels=UpperCamelCase )
lowerCAmelCase__ : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : Dict = '''Training loss: {:.2e} lr: {:.2e}'''.format(UpperCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Dict = model.module if hasattr(UpperCamelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : Optional[Any] = os.path.join(args.output_dir , UpperCamelCase )
lowerCAmelCase__ : int = os.path.join(args.output_dir , UpperCamelCase )
torch.save(model_to_save.state_dict() , UpperCamelCase )
model_to_save.config.to_json_file(UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase )
if args.do_eval:
model.eval()
lowerCAmelCase__ : List[str] = 0, 0
lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(UpperCamelCase , desc='''Evaluating''' ):
lowerCAmelCase__ : str = tuple(t.to(UpperCamelCase ) for t in batch )
lowerCAmelCase__ : str = batch
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(
UpperCamelCase , mc_token_ids=UpperCamelCase , lm_labels=UpperCamelCase , mc_labels=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('''cpu''' ).numpy()
lowerCAmelCase__ : List[Any] = accuracy(UpperCamelCase , UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Union[str, Any] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Optional[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : List[str] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Optional[int] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 701
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowerCAmelCase ( _lowercase ):
A__ = 42
class _lowerCAmelCase ( _lowercase , _lowercase ):
@register_to_config
def __init__( self , __UpperCAmelCase = 6_5536 , __UpperCAmelCase = None , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 0 , __UpperCAmelCase = "fourier" , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = 0.0 , __UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __UpperCAmelCase = "UNetMidBlock1D" , __UpperCAmelCase = None , __UpperCAmelCase = (32, 32, 64) , __UpperCAmelCase = None , __UpperCAmelCase = 8 , __UpperCAmelCase = 1 , __UpperCAmelCase = False , ):
super().__init__()
lowerCAmelCase__ : Dict = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase__ : str = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__UpperCAmelCase , log=__UpperCAmelCase , flip_sin_to_cos=__UpperCAmelCase )
lowerCAmelCase__ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase__ : int = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__UpperCAmelCase , downscale_freq_shift=__UpperCAmelCase )
lowerCAmelCase__ : str = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase__ : str = block_out_channels[0] * 4
lowerCAmelCase__ : Dict = TimestepEmbedding(
in_channels=__UpperCAmelCase , time_embed_dim=__UpperCAmelCase , act_fn=__UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCAmelCase__ : str = nn.ModuleList([] )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = nn.ModuleList([] )
lowerCAmelCase__ : Optional[Any] = None
# down
lowerCAmelCase__ : List[Any] = in_channels
for i, down_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = output_channel
lowerCAmelCase__ : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase__ : Any = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Any = get_down_block(
__UpperCAmelCase , num_layers=__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__UpperCAmelCase )
# mid
lowerCAmelCase__ : Optional[int] = get_mid_block(
__UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__UpperCAmelCase , add_downsample=__UpperCAmelCase , )
# up
lowerCAmelCase__ : Union[str, Any] = list(reversed(__UpperCAmelCase ) )
lowerCAmelCase__ : List[str] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase__ : Optional[int] = out_channels
else:
lowerCAmelCase__ : int = block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = output_channel
lowerCAmelCase__ : Any = (
reversed_block_out_channels[i + 1] if i < len(__UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCAmelCase__ : Optional[int] = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Optional[int] = get_up_block(
__UpperCAmelCase , num_layers=__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__UpperCAmelCase )
lowerCAmelCase__ : int = output_channel
# out
lowerCAmelCase__ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase__ : Dict = get_out_block(
out_block_type=__UpperCAmelCase , num_groups_out=__UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=__UpperCAmelCase , act_fn=__UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , ):
lowerCAmelCase__ : int = timestep
if not torch.is_tensor(__UpperCAmelCase ):
lowerCAmelCase__ : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ : Any = timesteps[None].to(sample.device )
lowerCAmelCase__ : int = self.time_proj(__UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCAmelCase__ : str = self.time_mlp(__UpperCAmelCase )
else:
lowerCAmelCase__ : Tuple = timestep_embed[..., None]
lowerCAmelCase__ : Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase__ : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase__ : int = ()
for downsample_block in self.down_blocks:
lowerCAmelCase__ , lowerCAmelCase__ : int = downsample_block(hidden_states=__UpperCAmelCase , temb=__UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase__ : Dict = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase__ : Tuple = down_block_res_samples[-1:]
lowerCAmelCase__ : int = down_block_res_samples[:-1]
lowerCAmelCase__ : Optional[int] = upsample_block(__UpperCAmelCase , res_hidden_states_tuple=__UpperCAmelCase , temb=__UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCAmelCase__ : Any = self.out_block(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__UpperCAmelCase )
| 470
| 0
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> int:
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
_lowercase = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
_lowercase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class a_ ( __lowerCamelCase ):
a : Dict = 'sigmoid'
a : Any = 'softmax'
a : Union[str, Any] = 'none'
@add_end_docstrings(
__lowerCamelCase , R'''\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ''' , )
class a_ ( __lowerCamelCase ):
a : List[Any] = False
a : Any = ClassificationFunction.NONE
def __init__( self , **__UpperCamelCase ):
super().__init__(**lowerCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="" , **__UpperCamelCase ):
_lowercase = tokenizer_kwargs
_lowercase = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
_lowercase = self.model.config.return_all_scores
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) or top_k is None:
_lowercase = top_k
_lowercase = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowerCamelCase__ , )
if return_all_scores:
_lowercase = None
else:
_lowercase = 1
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_lowercase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowercase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
_lowercase = super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowercase = """top_k""" not in kwargs
if isinstance(args[0] , lowerCamelCase__ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCamelCase_ ( self , __UpperCamelCase , **__UpperCamelCase ):
_lowercase = self.framework
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return self.tokenizer(**lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) == 1 and isinstance(inputs[0] , lowerCamelCase__ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase_ ( self , __UpperCamelCase ):
return self.model(**lowerCamelCase__ )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase=True ):
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowercase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowercase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
_lowercase = self.model.config.function_to_apply
else:
_lowercase = ClassificationFunction.NONE
_lowercase = model_outputs["""logits"""][0]
_lowercase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowercase = sigmoid(lowerCamelCase__ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowercase = softmax(lowerCamelCase__ )
elif function_to_apply == ClassificationFunction.NONE:
_lowercase = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowercase = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowerCamelCase__ )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCamelCase : x["score"] , reverse=lowerCamelCase__ )
if top_k is not None:
_lowercase = dict_scores[:top_k]
return dict_scores
| 287
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 100 ):
'''simple docstring'''
A_ = 0
A_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 203
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 707
|
def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70
| 0
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(_a ) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(_a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_a ) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(_a ) )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(_a ) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(_a , variant=_a ) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : int = """fp16"""
self.assertTrue(is_safetensors_compatible(_a , variant=_a ) )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ : List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(_a , variant=_a ) )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ : List[Any] = """fp16"""
self.assertFalse(is_safetensors_compatible(_a , variant=_a ) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : str = """fp16"""
self.assertTrue(is_safetensors_compatible(_a , variant=_a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(_a , variant=_a ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ : Any = """fp16"""
self.assertFalse(is_safetensors_compatible(_a , variant=_a ) )
| 680
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__a : Optional[int] = 1_0
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for i in range(lowercase_ , lowercase_ ):
if array[i] == target:
return i
return -1
def __magic_name__ ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = len(lowercase_ )
while left <= right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = (left + right) // 3 + 1
UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase = one_third - 1
elif array[two_third] < target:
UpperCamelCase = two_third + 1
else:
UpperCamelCase = one_third + 1
UpperCamelCase = two_third - 1
else:
return -1
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = (left + right) // 3 + 1
UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase_ , one_third - 1 , lowercase_ , lowercase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase_ , lowercase_ , lowercase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase_ , lowercase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__a : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
__a : Tuple = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__a : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
__a : Optional[Any] = ite_ternary_search(collection, target)
__a : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("""Not found""")
| 606
| 0
|
import logging
from transformers import PretrainedConfig
lowercase : List[Any] = logging.getLogger(__name__)
lowercase : Tuple = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = 'bertabs'
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=0.2 , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = max_pos
snake_case_ : str = enc_layers
snake_case_ : Tuple = enc_hidden_size
snake_case_ : List[Any] = enc_heads
snake_case_ : Tuple = enc_ff_size
snake_case_ : Dict = enc_dropout
snake_case_ : List[str] = dec_layers
snake_case_ : Tuple = dec_hidden_size
snake_case_ : Tuple = dec_heads
snake_case_ : Optional[int] = dec_ff_size
snake_case_ : Optional[int] = dec_dropout
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowercase : int = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 114
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 536
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("T")
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = data
a_ = None
def __str__( self ):
"""simple docstring"""
return f'{self.data}'
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = None
def __iter__( self ):
"""simple docstring"""
a_ = self.top
while node:
yield node.data
a_ = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self ):
"""simple docstring"""
return self.top is None
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = Node(UpperCamelCase__ )
if not self.is_empty():
a_ = self.top
a_ = node
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
a_ = self.top
a_ = self.top.next
return pop_node.data
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _a ( self ):
"""simple docstring"""
a_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 536
| 1
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = (CMStochasticIterativeScheduler,)
snake_case_ = 1_0
def _UpperCAmelCase ( self : Union[str, Any] , **snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[int] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**snake_case )
return config
def _UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = 10
__magic_name__ : List[Any] = self.get_scheduler_config()
__magic_name__ : Any = self.scheduler_classes[0](**snake_case )
scheduler.set_timesteps(snake_case )
__magic_name__ : Dict = scheduler.timesteps[0]
__magic_name__ : Any = scheduler.timesteps[1]
__magic_name__ : List[str] = self.dummy_sample
__magic_name__ : Dict = 0.1 * sample
__magic_name__ : Any = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
__magic_name__ : Optional[Any] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def _UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case )
def _UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : int = self.get_scheduler_config()
__magic_name__ : Any = scheduler_class(**snake_case )
__magic_name__ : str = 1
scheduler.set_timesteps(snake_case )
__magic_name__ : Union[str, Any] = scheduler.timesteps
__magic_name__ : Union[str, Any] = torch.manual_seed(0 )
__magic_name__ : List[str] = self.dummy_model()
__magic_name__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case ):
# 1. scale model input
__magic_name__ : Tuple = scheduler.scale_model_input(snake_case , snake_case )
# 2. predict noise residual
__magic_name__ : Optional[Any] = model(snake_case , snake_case )
# 3. predict previous sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
__magic_name__ : Union[str, Any] = pred_prev_sample
__magic_name__ : str = torch.sum(torch.abs(snake_case ) )
__magic_name__ : Optional[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def _UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : int = self.get_scheduler_config()
__magic_name__ : Any = scheduler_class(**snake_case )
__magic_name__ : Any = [106, 0]
scheduler.set_timesteps(timesteps=snake_case )
__magic_name__ : Optional[int] = scheduler.timesteps
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : Tuple = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__magic_name__ : Tuple = scheduler.scale_model_input(snake_case , snake_case )
# 2. predict noise residual
__magic_name__ : Tuple = model(snake_case , snake_case )
# 3. predict previous sample x_t-1
__magic_name__ : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
__magic_name__ : Union[str, Any] = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(snake_case ) )
__magic_name__ : Dict = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def _UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Dict = self.get_scheduler_config()
__magic_name__ : Tuple = scheduler_class(**snake_case )
__magic_name__ : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case )
def _UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : Optional[Any] = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**snake_case )
__magic_name__ : List[Any] = [39, 30, 12, 1, 0]
__magic_name__ : List[str] = len(snake_case )
with self.assertRaises(snake_case , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[str] = self.scheduler_classes[0]
__magic_name__ : List[Any] = self.get_scheduler_config()
__magic_name__ : int = scheduler_class(**snake_case )
__magic_name__ : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case )
| 147
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = 'xlm-prophetnet'
snake_case_ = ['past_key_values']
snake_case_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self : Tuple , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 3_0522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : List[str] , ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = vocab_size
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : Any = encoder_ffn_dim
__magic_name__ : str = num_encoder_layers
__magic_name__ : List[str] = num_encoder_attention_heads
__magic_name__ : Dict = decoder_ffn_dim
__magic_name__ : int = num_decoder_layers
__magic_name__ : str = num_decoder_attention_heads
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = init_std # Normal(0, this parameter)
__magic_name__ : Optional[int] = activation_function
# parameters for xlmprophetnet
__magic_name__ : int = ngram
__magic_name__ : List[Any] = num_buckets
__magic_name__ : int = relative_max_distance
__magic_name__ : List[str] = disable_ngram_loss
__magic_name__ : Union[str, Any] = eps
# 3 Types of Dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : Optional[int] = dropout
__magic_name__ : Dict = use_cache
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , )
@property
def _UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCAmelCase ( self : List[Any] , snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 147
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case, snake_case ):
@register_to_config
def __init__( self : str , snake_case_ : bool , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None ):
"""simple docstring"""
super().__init__()
A : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
A : Optional[Any] = torch.zeros(snake_case_ , snake_case_ )
else:
A : Optional[Any] = None
A : Dict = torch.nn.Parameter(snake_case_ )
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self : Union[str, Any] , snake_case_ : VQModel , snake_case_ : CLIPTextModel , snake_case_ : CLIPTokenizer , snake_case_ : TransformeraDModel , snake_case_ : VQDiffusionScheduler , snake_case_ : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=snake_case_ , transformer=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
def _UpperCAmelCase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
"""simple docstring"""
A : Union[str, Any] = len(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else 1
# get prompt text embeddings
A : Optional[Any] = self.tokenizer(
snake_case_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
A : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
A : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
A : Optional[int] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case_ )
# duplicate text embeddings for each generation per prompt
A : int = prompt_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
A : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
A : List[Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case_ , 1 , 1 )
else:
A : int = [''''''] * batch_size
A : Union[str, Any] = text_input_ids.shape[-1]
A : Any = self.tokenizer(
snake_case_ , padding='''max_length''' , max_length=snake_case_ , truncation=snake_case_ , return_tensors='''pt''' , )
A : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
A : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A : Optional[Any] = negative_prompt_embeds.shape[1]
A : str = negative_prompt_embeds.repeat(1 , snake_case_ , 1 )
A : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A : int = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : int , snake_case_ : Union[str, List[str]] , snake_case_ : int = 100 , snake_case_ : float = 5.0 , snake_case_ : float = 1.0 , snake_case_ : int = 1 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , snake_case_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case_ : int = 1 , ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A : Tuple = 1
elif isinstance(snake_case_ , snake_case_ ):
A : int = len(snake_case_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(snake_case_ )}""" )
A : List[str] = batch_size * num_images_per_prompt
A : List[str] = guidance_scale > 1.0
A : Dict = self._encode_prompt(snake_case_ , snake_case_ , snake_case_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case_ , snake_case_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(snake_case_ )}.""" )
# get the initial completely masked latents unless the user supplied it
A : Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
A : List[Any] = self.transformer.num_vector_embeds - 1
A : str = torch.full(snake_case_ , snake_case_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
A : Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case_ , device=self.device )
A : Any = self.scheduler.timesteps.to(self.device )
A : Optional[Any] = latents
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the sample if we are doing classifier free guidance
A : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
A : Dict = self.transformer(snake_case_ , encoder_hidden_states=snake_case_ , timestep=snake_case_ ).sample
if do_classifier_free_guidance:
A , A : Tuple = model_output.chunk(2 )
A : int = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case_ , dim=1 , keepdim=snake_case_ )
A : int = self.truncate(snake_case_ , snake_case_ )
# remove `log(0)`'s (`-inf`s)
A : int = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
A : Dict = self.scheduler.step(snake_case_ , timestep=snake_case_ , sample=snake_case_ , generator=snake_case_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case_ , snake_case_ , snake_case_ )
A : Tuple = self.vqvae.config.vq_embed_dim
A : str = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
A : str = self.vqvae.quantize.get_codebook_entry(snake_case_ , shape=snake_case_ )
A : List[Any] = self.vqvae.decode(snake_case_ , force_not_quantize=snake_case_ ).sample
A : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : Optional[int] = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
def _UpperCAmelCase ( self : List[Any] , snake_case_ : torch.FloatTensor , snake_case_ : float ):
"""simple docstring"""
A , A : List[str] = torch.sort(snake_case_ , 1 , descending=snake_case_ )
A : Union[str, Any] = torch.exp(snake_case_ )
A : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
A : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , snake_case_ )
A : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 )
A : Union[str, Any] = keep_mask[:, :-1, :]
A : int = keep_mask.gather(1 , indices.argsort(1 ) )
A : Tuple = log_p_x_0.clone()
A : Any = -torch.inf # -inf = log(0)
return rv
| 256
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = (CMStochasticIterativeScheduler,)
lowerCamelCase_ = 1_0
def _UpperCAmelCase ( self : Any , **snake_case_ : Tuple ):
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**snake_case_ )
return config
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = 10
A : Dict = self.get_scheduler_config()
A : Optional[int] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
A : List[str] = scheduler.timesteps[0]
A : Any = scheduler.timesteps[1]
A : int = self.dummy_sample
A : str = 0.1 * sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**snake_case_ )
A : str = 1
scheduler.set_timesteps(snake_case_ )
A : Optional[int] = scheduler.timesteps
A : int = torch.manual_seed(0 )
A : Optional[Any] = self.dummy_model()
A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
A : Dict = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : Union[str, Any] = pred_prev_sample
A : List[str] = torch.sum(torch.abs(snake_case_ ) )
A : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : str = scheduler_class(**snake_case_ )
A : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
A : Optional[int] = scheduler.timesteps
A : Any = torch.manual_seed(0 )
A : Tuple = self.dummy_model()
A : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : str = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : str = pred_prev_sample
A : str = torch.sum(torch.abs(snake_case_ ) )
A : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Optional[int] = self.scheduler_classes[0]
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**snake_case_ )
A : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : List[str] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : Tuple = scheduler_class(**snake_case_ )
A : Any = [39, 30, 12, 1, 0]
A : List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : str = self.get_scheduler_config()
A : List[Any] = scheduler_class(**snake_case_ )
A : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 256
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase_ ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__UpperCAmelCase : Any = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
__UpperCAmelCase : int = PipelineDataFormat.from_str(
format=lowercase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase_ , lowercase_ )
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = nlp
__UpperCAmelCase : Any = reader
@staticmethod
def A( lowercase__):
__UpperCAmelCase : Optional[int] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=lowercase__ , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=lowercase__ , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=lowercase__ , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=lowercase__ , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=lowercase__ , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=lowercase__ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=lowercase__ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=lowercase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self._nlp, []
for entry in self._reader:
__UpperCAmelCase : Tuple = nlp(**lowercase__) if self._reader.is_multi_columns else nlp(lowercase__)
if isinstance(lowercase__ , lowercase__):
outputs.append(lowercase__)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__UpperCAmelCase : Tuple = self._reader.save_binary(lowercase__)
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}")
else:
self._reader.save(lowercase__)
| 675
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.