code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_, A_ ) -> str:
UpperCAmelCase__ =dataset
UpperCAmelCase__ =process
UpperCAmelCase__ =params
def __len__( self ) -> Any:
return len(self.dataset )
def __getitem__( self, A_ ) -> Optional[Any]:
UpperCAmelCase__ =self.dataset[i]
UpperCAmelCase__ =self.process(A_, **self.params )
return processed
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_, A_, A_=None ) -> Dict:
UpperCAmelCase__ =loader
UpperCAmelCase__ =infer
UpperCAmelCase__ =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase__ =None
UpperCAmelCase__ =loader_batch_size
# Internal bookkeeping
UpperCAmelCase__ =None
UpperCAmelCase__ =None
def __len__( self ) -> Dict:
return len(self.loader )
def __iter__( self ) -> Any:
UpperCAmelCase__ =iter(self.loader )
return self
def __UpperCAmelCase ( self ) -> Optional[Any]:
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase__ =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase__ ={}
for k, element in self._loader_batch_data.items():
if isinstance(A_, A_ ):
# Convert ModelOutput to tuple first
UpperCAmelCase__ =element.to_tuple()
if isinstance(element[0], torch.Tensor ):
UpperCAmelCase__ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
UpperCAmelCase__ =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(A_, A_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
UpperCAmelCase__ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
UpperCAmelCase__ =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase__ =None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ =np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase__ =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase__ =self._loader_batch_data.__class__(A_ )
self._loader_batch_index += 1
return result
def __UpperCAmelCase ( self ) -> int:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase__ =next(self.iterator )
UpperCAmelCase__ =self.infer(A_, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(A_, torch.Tensor ):
UpperCAmelCase__ =processed
else:
UpperCAmelCase__ =list(processed.keys() )[0]
UpperCAmelCase__ =processed[key]
if isinstance(A_, A_ ):
UpperCAmelCase__ =len(A_ )
else:
UpperCAmelCase__ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ =observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase__ =processed
UpperCAmelCase__ =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_, A_, A_=None ) -> List[Any]:
super().__init__(A_, A_, A_ )
def __iter__( self ) -> Tuple:
UpperCAmelCase__ =iter(self.loader )
UpperCAmelCase__ =None
return self
def __UpperCAmelCase ( self ) -> List[Any]:
if self.subiterator is None:
UpperCAmelCase__ =self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
UpperCAmelCase__ =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase__ =self.infer(next(self.iterator ), **self.params )
UpperCAmelCase__ =next(self.subiterator )
return processed
class snake_case_ ( a ):
'''simple docstring'''
def __iter__( self ) -> str:
UpperCAmelCase__ =iter(self.loader )
return self
def __UpperCAmelCase ( self ) -> Any:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCAmelCase__ =False
UpperCAmelCase__ =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ =self.loader_batch_item()
UpperCAmelCase__ =item.pop("is_last" )
accumulator.append(A_ )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase__ =self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(A_, torch.Tensor ):
UpperCAmelCase__ =processed
else:
UpperCAmelCase__ =list(processed.keys() )[0]
UpperCAmelCase__ =processed[key]
if isinstance(A_, A_ ):
UpperCAmelCase__ =len(A_ )
else:
UpperCAmelCase__ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ =observed_batch_size
UpperCAmelCase__ =processed
UpperCAmelCase__ =0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ =self.loader_batch_item()
UpperCAmelCase__ =item.pop("is_last" )
accumulator.append(A_ )
if is_last:
return accumulator
else:
UpperCAmelCase__ =processed
UpperCAmelCase__ =item.pop("is_last" )
accumulator.append(A_ )
return accumulator
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_ ) -> List[Any]:
UpperCAmelCase__ =dataset
UpperCAmelCase__ =key
def __len__( self ) -> int:
return len(self.dataset )
def __getitem__( self, A_ ) -> Optional[int]:
return self.dataset[i][self.key]
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_, A_ ) -> Any:
UpperCAmelCase__ =dataset
UpperCAmelCase__ =keya
UpperCAmelCase__ =keya
def __len__( self ) -> Tuple:
return len(self.dataset )
def __getitem__( self, A_ ) -> str:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 625
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase_ = get_logger(__name__)
def _UpperCAmelCase ( A , A , A , A , A=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase__ =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase__ =F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase__ =os.path.join(A , A )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase__ =(
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase__ =os.path.join(A , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(F"""Saving model to {ckpt_dir}""" )
UpperCAmelCase__ ={"model": state_dict}
dist_cp.save_state_dict(
state_dict=A , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def _UpperCAmelCase ( A , A , A , A , A=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
UpperCAmelCase__ =F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase__ =torch.load(A )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase__ =(
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase__ =torch.load(A )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase__ =(
os.path.join(A , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
UpperCAmelCase__ ={"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A , storage_reader=dist_cp.FileSystemReader(A ) , planner=DefaultLoadPlanner() , )
UpperCAmelCase__ =state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(A )
def _UpperCAmelCase ( A , A , A , A , A , A=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase__ =FSDP.optim_state_dict(A , A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase__ =(
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(A , A )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCAmelCase__ =os.path.join(A , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def _UpperCAmelCase ( A , A , A , A , A , A=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase__ =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase__ =(
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase__ =os.path.join(A , A )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCAmelCase__ =torch.load(A )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCAmelCase__ =(
os.path.join(A , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
UpperCAmelCase__ =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(A ) , )
UpperCAmelCase__ =optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
UpperCAmelCase__ =FSDP.optim_state_dict_to_load(A , A , A )
optimizer.load_state_dict(A )
| 625
| 1
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_A = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_A = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_A = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def UpperCAmelCase ( a_ ):
'''simple docstring'''
def remove_articles(a_ ):
lowerCamelCase : Any = re.compile(r'\b(a|an|the)\b', re.UNICODE )
return re.sub(a_, ' ', a_ )
def white_space_fix(a_ ):
return " ".join(text.split() )
def remove_punc(a_ ):
lowerCamelCase : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return int(normalize_answer(a_ ) == normalize_answer(a_ ) )
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : str = [any(compute_exact(a_, a_ ) for ref in refs ) for pred, refs in zip(a_, a_ )]
return (sum(a_ ) / len(a_ )) * 100
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase : str = Counter(a_ )
lowerCamelCase : List[str] = Counter(a_ )
lowerCamelCase : str = Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase : Tuple = scount * numref
lowerCamelCase : Dict = Counter(a_ )
lowerCamelCase : Any = Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase : int = ccount * numref
# KEEP
lowerCamelCase : int = sgramcounter_rep & cgramcounter_rep
lowerCamelCase : Optional[int] = keepgramcounter_rep & rgramcounter
lowerCamelCase : str = sgramcounter_rep & rgramcounter
lowerCamelCase : str = 0
lowerCamelCase : Union[str, Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : List[Any] = 1
lowerCamelCase : Dict = 1
if len(a_ ) > 0:
lowerCamelCase : Any = keeptmpscorea / len(a_ )
if len(a_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase : int = sgramcounter_rep - cgramcounter_rep
lowerCamelCase : Union[str, Any] = delgramcounter_rep - rgramcounter
lowerCamelCase : List[str] = sgramcounter_rep - rgramcounter
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Tuple = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : Tuple = 1
if len(a_ ) > 0:
lowerCamelCase : Dict = deltmpscorea / len(a_ )
# ADDITION
lowerCamelCase : str = set(a_ ) - set(a_ )
lowerCamelCase : Optional[Any] = set(a_ ) & set(a_ )
lowerCamelCase : List[str] = set(a_ ) - set(a_ )
lowerCamelCase : List[str] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : str = 1
lowerCamelCase : str = 1
if len(a_ ) > 0:
lowerCamelCase : Any = addtmpscore / len(a_ )
if len(a_ ) > 0:
lowerCamelCase : Union[str, Any] = addtmpscore / len(a_ )
lowerCamelCase : List[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase : Any = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = len(a_ )
lowerCamelCase : Optional[int] = ssent.split(' ' )
lowerCamelCase : Dict = csent.split(' ' )
lowerCamelCase : int = []
lowerCamelCase : str = []
lowerCamelCase : List[Any] = []
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[Any] = []
lowerCamelCase : int = []
lowerCamelCase : Any = []
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Tuple = []
lowerCamelCase : List[Any] = []
for rsent in rsents:
lowerCamelCase : List[str] = rsent.split(' ' )
lowerCamelCase : Tuple = []
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Dict = []
ragramslist.append(a_ )
for i in range(0, len(a_ ) - 1 ):
if i < len(a_ ) - 1:
lowerCamelCase : List[str] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(a_ )
if i < len(a_ ) - 2:
lowerCamelCase : int = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(a_ )
if i < len(a_ ) - 3:
lowerCamelCase : Optional[int] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(a_ )
ragramslist.append(a_ )
ragramslist.append(a_ )
ragramslist.append(a_ )
for i in range(0, len(a_ ) - 1 ):
if i < len(a_ ) - 1:
lowerCamelCase : int = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(a_ )
if i < len(a_ ) - 2:
lowerCamelCase : Union[str, Any] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(a_ )
if i < len(a_ ) - 3:
lowerCamelCase : Any = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(a_ )
for i in range(0, len(a_ ) - 1 ):
if i < len(a_ ) - 1:
lowerCamelCase : Any = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(a_ )
if i < len(a_ ) - 2:
lowerCamelCase : int = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(a_ )
if i < len(a_ ) - 3:
lowerCamelCase : Optional[int] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(a_ )
(lowerCamelCase) : str = SARIngram(a_, a_, a_, a_ )
(lowerCamelCase) : Union[str, Any] = SARIngram(a_, a_, a_, a_ )
(lowerCamelCase) : int = SARIngram(a_, a_, a_, a_ )
(lowerCamelCase) : Optional[int] = SARIngram(a_, a_, a_, a_ )
lowerCamelCase : Optional[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase : Tuple = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase : Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase ( a_, a_ = True, a_ = "13a", a_ = True ):
'''simple docstring'''
if lowercase:
lowerCamelCase : str = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase : Tuple = sacrebleu.metrics.bleu._get_tokenizer(a_ )()(a_ )
else:
lowerCamelCase : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(a_ )
elif tokenizer == "moses":
lowerCamelCase : Optional[Any] = sacremoses.MosesTokenizer().tokenize(a_, return_str=a_, escape=a_ )
elif tokenizer == "penn":
lowerCamelCase : Dict = sacremoses.MosesTokenizer().penn_tokenize(a_, return_str=a_ )
else:
lowerCamelCase : Tuple = sentence
if not return_str:
lowerCamelCase : Union[str, Any] = normalized_sent.split()
return normalized_sent
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
if not (len(a_ ) == len(a_ ) == len(a_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
lowerCamelCase : Any = 0
for src, pred, refs in zip(a_, a_, a_ ):
sari_score += SARIsent(normalize(a_ ), normalize(a_ ), [normalize(a_ ) for sent in refs] )
lowerCamelCase : Union[str, Any] = sari_score / len(a_ )
return 100 * sari_score
def UpperCAmelCase ( a_, a_, a_="exp", a_=None, a_=False, a_=False, a_=False, ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = len(references[0] )
if any(len(a_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCamelCase : Dict = [[refs[i] for refs in references] for i in range(a_ )]
lowerCamelCase : Any = sacrebleu.corpus_bleu(
a_, a_, smooth_method=a_, smooth_value=a_, force=a_, lowercase=a_, use_effective_order=a_, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def _UpperCamelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> str:
lowerCamelCase : str = {}
result.update({'sari': compute_sari(sources=UpperCAmelCase_ , predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )} )
result.update({'exact': compute_em(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )} )
return result
| 701
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_A = 2_5_0_0_0_4
_A = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = MBartaaTokenizer
lowercase_ = MBartaaTokenizerFast
lowercase_ = True
lowercase_ = True
def _UpperCamelCase ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Tuple:
lowerCamelCase : str = '<s>'
lowerCamelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase_ ) , 1054 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : Optional[int] = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_ )
lowerCamelCase : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
lowerCamelCase : Optional[Any] = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def _UpperCamelCase ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase : int = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Tuple = tempfile.mkdtemp()
lowerCamelCase : Any = tokenizer_r.save_pretrained(UpperCAmelCase_ )
lowerCamelCase : List[str] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase : int = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCamelCase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCamelCase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase : List[str] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase : int = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
lowercase_ = 'facebook/mbart-large-50-one-to-many-mmt'
lowercase_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase_ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def _UpperCamelCase ( cls ) -> int:
lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase : Union[str, Any] = 1
return cls
def _UpperCamelCase ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 250038 )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
lowerCamelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase : Union[str, Any] = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Tuple = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCAmelCase_ )
lowerCamelCase : str = 10
lowerCamelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250053, 250001] )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[str] = tempfile.mkdtemp()
lowerCamelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCamelCase : str = MBartaaTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ )
@require_torch
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors='pt' )
lowerCamelCase : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase : Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
lowerCamelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' )
lowerCamelCase : List[Any] = targets['input_ids']
lowerCamelCase : List[Any] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : List[Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# en_XX, A, test, EOS
'input_ids': [[250004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 133
| 0
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , a : Tuple , a : int = 1_3 , a : int = 6_4 , a : int = 2 , a : int = 3 , a : int = 3 , a : bool = True , a : bool = True , a : int = 1_2_8 , a : Dict=[1_6, 3_2, 6_4, 1_2_8] , a : int = 7 , a : int = 4 , a : int = 3_7 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 1_0 , a : float = 0.02 , a : int = 2 , a : int = 1 , a : int = 1_2_8 , a : List[int] = [2, 2, 2, 2] , a : int = 2 , a : int = 2 , ):
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : Union[str, Any] = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Tuple = is_training
lowercase_ : List[str] = use_labels
lowercase_ : Dict = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : str = initializer_range
lowercase_ : Dict = encoder_stride
lowercase_ : Any = num_attention_outputs
lowercase_ : List[Any] = embed_dim
lowercase_ : List[Any] = embed_dim + 1
lowercase_ : Optional[int] = resolution
lowercase_ : List[str] = depths
lowercase_ : List[str] = hidden_sizes
lowercase_ : Optional[int] = dim
lowercase_ : List[Any] = mlp_expansion_ratio
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Any = None
if self.use_labels:
lowercase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__ ( self : str , a : List[Any] , a : Union[str, Any] , a : Any ):
'''simple docstring'''
lowercase_ : str = TFEfficientFormerModel(config=a )
lowercase_ : int = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Dict , a : str , a : Union[str, Any] , a : int ):
'''simple docstring'''
lowercase_ : List[str] = self.type_sequence_label_size
lowercase_ : Dict = TFEfficientFormerForImageClassification(a )
lowercase_ : Optional[int] = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : int = 1
lowercase_ : str = TFEfficientFormerForImageClassification(a )
lowercase_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : str = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Any = config_and_inputs
lowercase_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase: Tuple = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__lowerCamelCase: List[Any] = False
__lowerCamelCase: Tuple = False
__lowerCamelCase: int = False
__lowerCamelCase: Optional[Any] = False
__lowerCamelCase: List[str] = False
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : int = TFEfficientFormerModelTester(self )
lowercase_ : str = ConfigTester(
self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(a )
lowercase_ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(a : str , a : List[Any] , a : List[str] ):
lowercase_ : int = model_class(a )
lowercase_ : List[Any] = model(**self._prepare_for_class(a , a ) , training=a )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowercase_ : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowercase_ : List[Any] = seq_length * self.model_tester.chunk_length
else:
lowercase_ : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowercase_ : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(a , (list, tuple) )
self.assertEqual(len(a ) , a )
lowercase_ : Optional[Any] = getattr(self.model_tester , "seq_length" , a )
lowercase_ : Dict = getattr(self.model_tester , "decoder_seq_length" , a )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Tuple = True
check_hidden_states_output(a , a , a )
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any] , a : Dict , a : str=False ):
'''simple docstring'''
lowercase_ : str = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = TFEfficientFormerModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = True
lowercase_ : Tuple = getattr(self.model_tester , "seq_length" , a )
lowercase_ : int = getattr(self.model_tester , "encoder_seq_length" , a )
lowercase_ : List[Any] = getattr(self.model_tester , "key_length" , a )
lowercase_ : Optional[Any] = getattr(self.model_tester , "chunk_length" , a )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowercase_ : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowercase_ : str = True
lowercase_ : List[Any] = False
lowercase_ : Dict = True
lowercase_ : List[Any] = model_class(a )
lowercase_ : str = model(**self._prepare_for_class(a , a ) , training=a )
lowercase_ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : Optional[int] = True
lowercase_ : int = model_class(a )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(a , a ) , training=a )
lowercase_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowercase_ : Optional[int] = model_class(a )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowercase_ : Optional[int] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowercase_ : Optional[int] = model(a )
self.assertTrue(outputs_dict is not None )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Tuple = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowercase_ : Dict = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : Optional[int] = image_processor(images=a , return_tensors="tf" )
# forward pass
lowercase_ : Optional[int] = model(**a , training=a )
# verify the logits
lowercase_ : Tuple = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase_ : Tuple = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Dict = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowercase_ : Union[str, Any] = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : List[Any] = image_processor(images=a , return_tensors="tf" )
# forward pass
lowercase_ : Dict = model(**a , training=a )
# verify the logits
lowercase_ : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase_ : Union[str, Any] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 620
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = hidden_states.shape
lowercase_ : Tuple = jax.image.resize(
a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
lowercase_ : List[Any] = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , a : int ):
'''simple docstring'''
lowercase_ : Any = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: int = None
__lowerCamelCase: float = 0.0
__lowerCamelCase: bool = None
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Tuple = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : List[str] = nn.Dense(a , dtype=self.dtype )
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Any = nn.Dropout(self.dropout_prob )
lowercase_ : Dict = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ : Optional[Any] = None
if use_nin_shortcut:
lowercase_ : Union[str, Any] = nn.Conv(
a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : List[str] , a : str , a : Dict , a : List[str]=True ):
'''simple docstring'''
lowercase_ : Dict = hidden_states
lowercase_ : int = self.norma(a )
lowercase_ : List[Any] = nn.swish(a )
lowercase_ : Dict = self.conva(a )
lowercase_ : Optional[int] = self.time_emb_proj(nn.swish(a ) )
lowercase_ : Tuple = jnp.expand_dims(jnp.expand_dims(a , 1 ) , 1 )
lowercase_ : List[str] = hidden_states + temb
lowercase_ : Optional[Any] = self.norma(a )
lowercase_ : Any = nn.swish(a )
lowercase_ : List[str] = self.dropout(a , a )
lowercase_ : int = self.conva(a )
if self.conv_shortcut is not None:
lowercase_ : List[str] = self.conv_shortcut(a )
return hidden_states + residual
| 620
| 1
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_A : Tuple ={'''UserAgent''': UserAgent().random}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict:
lowerCamelCase__ : List[Any] = script.contents[0]
lowerCamelCase__ : str = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowercase :
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = F'''https://www.instagram.com/{username}/'''
lowerCamelCase__ : Dict = self.get_json()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = requests.get(self.url , headers=UpperCamelCase__ ).text
lowerCamelCase__ : Tuple = BeautifulSoup(UpperCamelCase__ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Union[str, Any] ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[str] ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["username"]
@property
def lowerCamelCase_ ( self: Any ):
return self.user_data["full_name"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["biography"]
@property
def lowerCamelCase_ ( self: str ):
return self.user_data["business_email"]
@property
def lowerCamelCase_ ( self: int ):
return self.user_data["external_url"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase_ ( self: List[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return self.user_data["is_verified"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "github" ) -> None:
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCamelCase__ : int = InstagramUser(UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Dict =InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 631
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE ( ):
__a = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
__a = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return image
def SCREAMING_SNAKE_CASE ( a_ : int ):
__a = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( a_ : Any , a_ : Dict , a_ : Optional[int] ):
__a = dct.pop(a_ )
__a = val
def SCREAMING_SNAKE_CASE ( a_ : Optional[Any] , a_ : int ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__a = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
__a = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
__a = torch.cat((q_bias, torch.zeros_like(a_ , requires_grad=a_ ), v_bias) )
__a = qkv_bias
def SCREAMING_SNAKE_CASE ( a_ : List[str] ):
__a = 364 if 'coco' in model_name else 224
__a = InstructBlipVisionConfig(image_size=a_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__a = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__a = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__a = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
__a = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__a = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
__a = InstructBlipConfig(vision_config=a_ , text_config=a_ , qformer_config=a_ )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( a_ : Union[str, Any] , a_ : str=None , a_ : List[str]=False ):
__a = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
__a = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__a = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
__a , __a = get_blipa_config(a_ )
__a = InstructBlipForConditionalGeneration(a_ ).eval()
__a = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
__a , __a = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__a = 'cuda:1' if torch.cuda.is_available() else 'cpu'
__a = 'cuda:2' if torch.cuda.is_available() else 'cpu'
__a , __a , __a = load_model_and_preprocess(
name=a_ , model_type=a_ , is_eval=a_ , device=a_ )
original_model.eval()
print('Done!' )
# update state dict keys
__a = original_model.state_dict()
__a = create_rename_keys(a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__a = state_dict.pop(a_ )
if key.startswith('Qformer.bert' ):
__a = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__a = key.replace('self' , 'attention' )
if "llm_proj" in key:
__a = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
__a = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
__a = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
__a = key.replace('t5' , 'language' )
__a = val
# read in qv biases
read_in_q_v_bias(a_ , a_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(a_ , strict=a_ )
__a = load_demo_image()
__a = 'What is unusual about this image?'
# create processor
__a = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a_ , image_std=a_ )
__a = InstructBlipProcessor(
image_processor=a_ , tokenizer=a_ , qformer_tokenizer=a_ , )
__a = processor(images=a_ , text=a_ , return_tensors='pt' ).to(a_ )
# make sure processor creates exact same pixel values
__a = vis_processors['eval'](a_ ).unsqueeze(0 ).to(a_ )
__a = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , a_ )
original_model.to(a_ )
hf_model.to(a_ )
with torch.no_grad():
if "vicuna" in model_name:
__a = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
__a = hf_model(**a_ ).logits
else:
__a = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
__a = tokenizer('\n' , return_tensors='pt' ).input_ids.to(a_ )
__a = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
__a = hf_model(**a_ , labels=a_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__a = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , a_ , atol=a_ )
print('Looks ok!' )
print('Generating with original model...' )
__a = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
__a = hf_model.generate(
**a_ , do_sample=a_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__a = 2
print('Original generation:' , a_ )
__a = processor.batch_decode(a_ , skip_special_tokens=a_ )
__a = [text.strip() for text in output_text]
print('HF generation:' , a_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
UpperCAmelCase_ = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCAmelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 539
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539
| 1
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ = logging.get_logger(__name__)
a_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
a_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
a_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
a_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
a_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
a_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
a_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
a_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
a_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
a_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
a_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
a_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
a_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
a_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : Any = FLAX_MODEL_MAPPING
a_ = auto_class_update(FlaxAutoModel)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : int = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : str = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 719
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = AlbertTokenizer
lowerCAmelCase__ : int = AlbertTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = True
def _UpperCAmelCase ( self: Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = AlbertTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = "this is a test"
__UpperCAmelCase = "this is a test"
return input_text, output_text
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = "<pad>"
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__lowerCAmelCase ) , 30_000 )
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _UpperCAmelCase ( self: str ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = "I was born in 92000, and this is falsé."
__UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase )
__UpperCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = AlbertTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCAmelCase , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [48, 25, 21, 1_289] )
__UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = AlbertTokenizer(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.encode("sequence builders" )
__UpperCAmelCase = tokenizer.encode("multi-sequence build" )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 286
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 80
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a = 1, __a = 50, __a = None, __a = "pil", __a = True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.unet.config.sample_size
_lowerCAmelCase : Optional[Any] = (batch_size, 3, img_size, img_size)
_lowerCAmelCase : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase : Union[str, Any] = randn_tensor(__a, generator=__a, device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_lowerCAmelCase : Optional[Any] = self.scheduler.schedule[t]
_lowerCAmelCase : int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase , _lowerCAmelCase : Dict = self.scheduler.add_noise_to_input(__a, __a, generator=__a)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase : Optional[int] = self.scheduler.step(__a, __a, __a, __a)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
_lowerCAmelCase : List[str] = self.scheduler.step_correct(
__a, __a, __a, __a, step_output.prev_sample, step_output["derivative"], )
_lowerCAmelCase : Optional[int] = step_output.prev_sample
_lowerCAmelCase : Tuple = (sample / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : int = sample.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : int = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 500
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672
|
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = SwinConfig(image_size=192 )
if "base" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 6
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Dict = (4, 8, 16, 32)
elif "large" in model_name:
__SCREAMING_SNAKE_CASE : str = 12
__SCREAMING_SNAKE_CASE : Union[str, Any] = 192
__SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__SCREAMING_SNAKE_CASE : List[str] = window_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
__SCREAMING_SNAKE_CASE : Dict = depths
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads
return config
def a__ ( snake_case ):
"""simple docstring"""
if "encoder.mask_token" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__SCREAMING_SNAKE_CASE : List[Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
__SCREAMING_SNAKE_CASE : List[str] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = '''swin.''' + name
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : List[Any] = orig_state_dict.pop(snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
__SCREAMING_SNAKE_CASE : Any = key.split('''.''' )
__SCREAMING_SNAKE_CASE : int = int(key_split[2] )
__SCREAMING_SNAKE_CASE : Tuple = int(key_split[4] )
__SCREAMING_SNAKE_CASE : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : List[str] = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = val[
:dim
]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Any = val[
-dim:
]
else:
__SCREAMING_SNAKE_CASE : Tuple = val
return orig_state_dict
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = torch.load(snake_case , map_location='''cpu''' )['''model''']
__SCREAMING_SNAKE_CASE : int = get_swin_config(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = SwinForMaskedImageModeling(snake_case )
model.eval()
__SCREAMING_SNAKE_CASE : int = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
__SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
__SCREAMING_SNAKE_CASE : Tuple = image_processor(images=snake_case , return_tensors='''pt''' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 74
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase_ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=2 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=2 , )-> Tuple:
_A = parent
_A = batch_size
_A = patch_size
_A = max_length
_A = num_mel_bins
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = frequency_stride
_A = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_A = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_A = (self.max_length - self.patch_size) // self.time_stride + 1
_A = frequency_out_dimension * time_out_dimension
_A = num_patches + 2
def UpperCamelCase ( self )-> int:
_A = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, input_values, labels
def UpperCamelCase ( self )-> int:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Optional[Any]:
_A = ASTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_A = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self )-> Any:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_values': input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase ( self )-> List[str]:
_A = ASTModelTester(self )
_A = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCamelCase ( self )-> List[Any]:
pass
def UpperCamelCase ( self )-> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self )-> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['input_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def UpperCamelCase ( self )-> Tuple:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ASTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_A = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_A , _A = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self )-> Dict:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase ( self )-> Any:
_A = self.default_feature_extractor
_A = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_UpperCamelCase )
_A = self.default_feature_extractor
_A , _A = prepare_audio()
_A = audio.squeeze().numpy()
_A = feature_extractor(_UpperCamelCase , sampling_rate=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCamelCase )
# verify the logits
_A = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_A = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 292
| 0
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a =get_logger(__name__)
a =Path(__file__).parent / """model_card_template.md"""
a =uuida().hex
a =os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
a =os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
a =HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = None ) -> str:
__lowerCamelCase : List[Any] = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ) -> Any:
if token is None:
__lowerCamelCase : List[Any] = HfFolder.get_token()
if organization is None:
__lowerCamelCase : List[Any] = whoami(lowerCamelCase__ )['name']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(lowerCamelCase__ , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
__lowerCamelCase : Union[str, Any] = args.hub_token if hasattr(lowerCamelCase__ , 'hub_token' ) else None
__lowerCamelCase : Dict = get_full_repo_name(lowerCamelCase__ , token=lowerCamelCase__ )
__lowerCamelCase : int = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCamelCase__ , model_name=lowerCamelCase__ , repo_name=lowerCamelCase__ , dataset_name=args.dataset_name if hasattr(lowerCamelCase__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCamelCase__ , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCamelCase__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCamelCase__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCamelCase__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCamelCase__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCamelCase__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(lowerCamelCase__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCamelCase__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
__lowerCamelCase : int = os.path.join(args.output_dir , 'README.md' )
model_card.save(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = None ) -> Dict:
if resolved_file is None or commit_hash is not None:
return commit_hash
__lowerCamelCase : str = str(Path(lowerCamelCase__ ).as_posix() )
__lowerCamelCase : List[str] = re.search(R'snapshots/([^/]+)/' , lowerCamelCase__ )
if search is None:
return None
__lowerCamelCase : Optional[int] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCamelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a =os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
a =os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = None , lowerCamelCase__ = None ) -> None:
if new_cache_dir is None:
__lowerCamelCase : List[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
__lowerCamelCase : int = old_diffusers_cache
__lowerCamelCase : List[str] = Path(lowerCamelCase__ ).expanduser()
__lowerCamelCase : int = Path(lowerCamelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__lowerCamelCase : str = new_cache_dir / old_blob_path.relative_to(lowerCamelCase__ )
new_blob_path.parent.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.replace(lowerCamelCase__ , lowerCamelCase__ )
try:
os.symlink(lowerCamelCase__ , lowerCamelCase__ )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a =os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
a =0
else:
with open(cache_version_file) as f:
try:
a =int(f.read())
except ValueError:
a =0
if cache_version < 1:
a =os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
a ="""\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if variant is not None:
__lowerCamelCase : List[Any] = weights_name.split('.' )
__lowerCamelCase : Union[str, Any] = splits[:-1] + [variant] + splits[-1:]
__lowerCamelCase : Tuple = '.'.join(lowerCamelCase__ )
return weights_name
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , *,
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> str:
__lowerCamelCase : Union[str, Any] = str(lowerCamelCase__ )
if os.path.isfile(lowerCamelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCamelCase__ ):
if os.path.isfile(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ):
# Load from a PyTorch checkpoint
__lowerCamelCase : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ):
__lowerCamelCase : Union[str, Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse('0.20.0' )
):
try:
__lowerCamelCase : str = hf_hub_download(
lowerCamelCase__ , filename=_add_variant(lowerCamelCase__ , lowerCamelCase__ ) , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , user_agent=lowerCamelCase__ , subfolder=lowerCamelCase__ , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , lowerCamelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCamelCase__ , lowerCamelCase__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(lowerCamelCase__ , lowerCamelCase__ )}' so that the correct variant file can be added." , lowerCamelCase__ , )
try:
# 2. Load model file as usual
__lowerCamelCase : int = hf_hub_download(
lowerCamelCase__ , filename=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , user_agent=lowerCamelCase__ , subfolder=lowerCamelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'this model name. Check the model page at '
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 337
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MaskFormerFeatureExtractor"""]
a =["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
a =[
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 337
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Optional[int] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'facebook/nllb-200-distilled-600M'
lowerCAmelCase_ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
lowerCAmelCase_ = 'translator'
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSeqaSeqLM
lowerCAmelCase_ = LANGUAGE_CODES
lowerCAmelCase_ = ['text', 'text', 'text']
lowerCAmelCase_ = ['text']
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Optional[Any],__A : int ):
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_lowerCamelCase : List[str] = self.lang_to_code[src_lang]
_lowerCamelCase : str = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__A,return_tensors="pt",src_lang=__A,tgt_lang=__A )
def lowerCamelCase_ ( self : Dict,__A : str ):
return self.model.generate(**__A )
def lowerCamelCase_ ( self : int,__A : Optional[Any] ):
return self.post_processor.decode(outputs[0].tolist(),skip_special_tokens=__A )
| 44
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCAmelCase : List[str] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase : Optional[Any] = str(bin(_lowerCamelCase ) )[2:]
UpperCAmelCase : Optional[int] = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Any = year % 19
UpperCAmelCase : Any = year % 4
UpperCAmelCase : str = year % 7
UpperCAmelCase : Union[str, Any] = math.floor(year / 100 )
UpperCAmelCase : Optional[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase : int = leap_day_inhibits / 4
UpperCAmelCase : int = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase : Tuple = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase : int = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase , 4 , 18 )
else:
return datetime(UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
A: Any = "will be" if year > datetime.now().year else "was"
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 359
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase_ = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
lowerCamelCase_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# forward without prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 3 * ['this is a negative prompt']
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = 3 * [inputs['prompt']]
lowerCamelCase_ = sd_pipe(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 3 * ['this is a negative prompt']
lowerCamelCase_ = 3 * [inputs.pop('prompt' )]
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = sd_pipe.encode_prompt(SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = sd_pipe(
**SCREAMING_SNAKE_CASE_ , prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.get_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 42
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = "arrow" ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
super().__init__(
split=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,keep_in_memory=__lowerCamelCase ,streaming=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : List[Any] = load_from_cache_file
lowerCAmelCase__ : Any = file_format
lowerCAmelCase__ : Dict = Spark(
df=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,working_dir=__lowerCamelCase ,**__lowerCamelCase ,)
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 647
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
for param in module.parameters():
__a : str = False
def __UpperCamelCase ( ):
__a : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a : Union[str, Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : List[str] = plt.imshow(__lowerCAmelCase )
fig.axes.get_xaxis().set_visible(__lowerCAmelCase )
fig.axes.get_yaxis().set_visible(__lowerCAmelCase )
plt.show()
def __UpperCamelCase ( ):
__a : Union[str, Any] = datetime.now()
__a : Optional[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 709
|
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return abs(lowerCAmelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__a , __a : Any = y, x % y
return abs(lowerCAmelCase__ )
def __UpperCamelCase ( ):
try:
__a : str = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__a : Optional[int] = int(nums[0] )
__a : List[str] = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCAmelCase__ , lowerCAmelCase__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 326
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Optional[Any]:
a = FileLock(str(tmpdir / "foo.lock"))
a = FileLock(str(tmpdir / "foo.lock"))
a = 0.01
with locka.acquire():
with pytest.raises(__UpperCamelCase):
a = time.time()
locka.acquire(__UpperCamelCase)
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Optional[Any]:
a = "a" * 10_00 + ".lock"
a = FileLock(str(tmpdir / filename))
assert locka._lock_file.endswith(".lock")
assert not locka._lock_file.endswith(__UpperCamelCase)
assert len(os.path.basename(locka._lock_file)) <= 2_55
a = FileLock(tmpdir / filename)
with locka.acquire():
with pytest.raises(__UpperCamelCase):
locka.acquire(0)
| 515
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list:
a = False
while is_sorted is False: # Until all the indices are traversed keep looping
a = True
for i in range(0 , len(__UpperCamelCase) - 1 , 2): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
a , a = input_list[i + 1], input_list[i]
# swapping if elements not in order
a = False
for i in range(1 , len(__UpperCamelCase) - 1 , 2): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
a , a = input_list[i + 1], input_list[i]
# swapping if elements not in order
a = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
lowercase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase__ : Any = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 515
| 1
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
UpperCAmelCase__ = 1.5
UpperCAmelCase__ = int(factor * num_class_images )
UpperCAmelCase__ = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase__ = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase__ = int(factor * num_images )
UpperCAmelCase__ = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = tqdm(desc="""downloading real regularization images""" , total=_SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
UpperCAmelCase__ = class_images[count]
count += 1
try:
UpperCAmelCase__ = requests.get(images["""url"""] )
if img.status_code == 2_0_0:
UpperCAmelCase__ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case__ ( ) ->Optional[int]:
UpperCAmelCase__ = argparse.ArgumentParser("""""" , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_0_0 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
a : List[str] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 717
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=2 , __lowercase=99 , __lowercase=0 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=2 , __lowercase=4 , __lowercase="last" , __lowercase=True , __lowercase=None , __lowercase=0 , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_lengths
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = gelu_activation
UpperCAmelCase__ = sinusoidal_embeddings
UpperCAmelCase__ = causal
UpperCAmelCase__ = asm
UpperCAmelCase__ = n_langs
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = n_special
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = summary_type
UpperCAmelCase__ = use_proj
UpperCAmelCase__ = scope
UpperCAmelCase__ = bos_token_id
def A__ ( self ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_input_lengths:
UpperCAmelCase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A__ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = XLMModel(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , lengths=__lowercase , langs=__lowercase )
UpperCAmelCase__ = model(__lowercase , langs=__lowercase )
UpperCAmelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = XLMWithLMHeadModel(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = XLMForQuestionAnsweringSimple(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase )
UpperCAmelCase__ = model(__lowercase , start_positions=__lowercase , end_positions=__lowercase )
UpperCAmelCase__ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = XLMForQuestionAnswering(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase )
UpperCAmelCase__ = model(
__lowercase , start_positions=__lowercase , end_positions=__lowercase , cls_index=__lowercase , is_impossible=__lowercase , p_mask=__lowercase , )
UpperCAmelCase__ = model(
__lowercase , start_positions=__lowercase , end_positions=__lowercase , cls_index=__lowercase , is_impossible=__lowercase , )
((UpperCAmelCase__) , ) = result_with_labels.to_tuple()
UpperCAmelCase__ = model(__lowercase , start_positions=__lowercase , end_positions=__lowercase )
((UpperCAmelCase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = XLMForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase )
UpperCAmelCase__ = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = XLMForTokenClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = XLMForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowercase : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowercase : Dict = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A__ ( self , __lowercase , __lowercase , __lowercase=False ):
UpperCAmelCase__ = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def A__ ( self ):
UpperCAmelCase__ = XLMModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__lowercase , emb_dim=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowercase )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase=1 ):
self.assertIsInstance(__lowercase , __lowercase )
self.assertListEqual(
[isinstance(__lowercase , __lowercase ) for iter_attentions in attentions] , [True] * len(__lowercase ) )
self.assertEqual(len(__lowercase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__lowercase ):
# adds PAD dummy token
UpperCAmelCase__ = min_length + idx + 1
UpperCAmelCase__ = min_length + idx + 1
UpperCAmelCase__ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowercase ) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase=1 ):
self.assertIsInstance(__lowercase , __lowercase )
self.assertListEqual(
[isinstance(__lowercase , __lowercase ) for iter_hidden_states in hidden_states] , [True] * len(__lowercase ) , )
self.assertEqual(len(__lowercase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__lowercase ):
# adds PAD dummy token
UpperCAmelCase__ = min_length + idx + 1
UpperCAmelCase__ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowercase ) , )
pass
@slow
def A__ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = XLMModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase__ = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__lowercase )
UpperCAmelCase__ = torch.tensor([[14, 447]] , dtype=torch.long , device=__lowercase ) # the president
UpperCAmelCase__ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase__ = model.generate(__lowercase , do_sample=__lowercase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowercase )
| 422
| 0
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE = """bart"""
SCREAMING_SNAKE_CASE = True
@st.cache(allow_output_mutation=lowercase__ )
def snake_case_ ( ):
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
UpperCAmelCase__ : Tuple = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
UpperCAmelCase__ : int = qar_model.eval()
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
UpperCAmelCase__ : Any = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
UpperCAmelCase__ : List[Any] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
UpperCAmelCase__ : List[str] = sas_model.eval()
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowercase__ )
def snake_case_ ( ):
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : List[Any] = faiss.StandardGpuResources()
UpperCAmelCase__ : Any = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
UpperCAmelCase__ : List[Any] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
UpperCAmelCase__ : Optional[Any] = faiss.IndexFlatIP(1_2_8 )
UpperCAmelCase__ : Optional[int] = faiss.index_cpu_to_gpu(lowercase__ , 1 , lowercase__ )
wikiaab_gpu_index_flat.add(lowercase__ ) # TODO fix for larger GPU
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (None, None)
UpperCAmelCase__ : Any = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowercase__ )
def snake_case_ ( ):
UpperCAmelCase__ : Any = datasets.load_dataset("eli5" , name="LFQA_reddit" )
UpperCAmelCase__ : List[Any] = elia["train_eli5"]
UpperCAmelCase__ : List[str] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 1_2_8) )
UpperCAmelCase__ : Dict = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(lowercase__ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = load_indexes()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = load_models()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = load_train_data()
def snake_case_ ( lowercase__ , lowercase__=1_0 ):
UpperCAmelCase__ : Dict = embed_questions_for_retrieval([question] , lowercase__ , lowercase__ )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = eli5_train_q_index.search(lowercase__ , lowercase__ )
UpperCAmelCase__ : str = [elia_train[int(lowercase__ )] for i in I[0]]
return nn_examples
def snake_case_ ( lowercase__ , lowercase__="wiki40b" , lowercase__="dense" , lowercase__=1_0 ):
if source == "none":
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = (" <P> ".join(["" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase__ , UpperCAmelCase__ : Any = query_qa_dense_index(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = query_es_index(
lowercase__ , lowercase__ , index_name="english_wiki40b_snippets_100w" , n_results=lowercase__ , )
UpperCAmelCase__ : Union[str, Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
UpperCAmelCase__ : Dict = "question: {} context: {}".format(lowercase__ , lowercase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowercase__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase__ : None),
} )
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=6_4 , lowercase__=2_5_6 , lowercase__=False , lowercase__=2 , lowercase__=0.95 , lowercase__=0.8 ):
with torch.no_grad():
UpperCAmelCase__ : List[Any] = qa_sas_generate(
lowercase__ , lowercase__ , lowercase__ , num_answers=1 , num_beams=lowercase__ , min_len=lowercase__ , max_len=lowercase__ , do_sample=lowercase__ , temp=lowercase__ , top_p=lowercase__ , top_k=lowercase__ , max_input_length=1_0_2_4 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
SCREAMING_SNAKE_CASE = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
SCREAMING_SNAKE_CASE = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Demo options""")
if demo_options:
SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE = action_list.index(action_st)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
SCREAMING_SNAKE_CASE = show_type == """Show full text of passages"""
else:
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
SCREAMING_SNAKE_CASE = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
SCREAMING_SNAKE_CASE = """wiki40b"""
SCREAMING_SNAKE_CASE = """dense"""
SCREAMING_SNAKE_CASE = """beam"""
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 6_4
SCREAMING_SNAKE_CASE = 2_5_6
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Generation options""")
if generate_options:
SCREAMING_SNAKE_CASE = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE = None
# start main text
SCREAMING_SNAKE_CASE = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
SCREAMING_SNAKE_CASE = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE = st.text_input("""Enter your question here:""", """""")
else:
SCREAMING_SNAKE_CASE = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
SCREAMING_SNAKE_CASE = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE = support_list[:1_0]
SCREAMING_SNAKE_CASE = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
SCREAMING_SNAKE_CASE = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE = """[{}]({})""".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE = sec_titles.split(""" & """)
SCREAMING_SNAKE_CASE = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE = find_nearest_training(question)
SCREAMING_SNAKE_CASE = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
SCREAMING_SNAKE_CASE = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
SCREAMING_SNAKE_CASE = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 199
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any]=7 , snake_case__ : Tuple=3 , snake_case__ : Optional[Any]=18 , snake_case__ : Dict=30 , snake_case__ : Optional[int]=4_00 , snake_case__ : str=True , snake_case__ : str=None , snake_case__ : Optional[int]=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : Tuple = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : Any = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = do_normalize
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Any = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "clusters" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : List[str] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case__ , "image_processor.json" )
image_processor_first.to_json_file(snake_case__ )
UpperCAmelCase__ : Any = self.image_processing_class.from_json_file(snake_case__ ).to_dict()
UpperCAmelCase__ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case__ )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case__ )
UpperCAmelCase__ : List[str] = self.image_processing_class.from_pretrained(snake_case__ ).to_dict()
UpperCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def snake_case_ ( ):
UpperCAmelCase__ : List[str] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCAmelCase__ : Tuple = Image.open(dataset[4]["file"] )
UpperCAmelCase__ : List[Any] = Image.open(dataset[5]["file"] )
UpperCAmelCase__ : Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCAmelCase__ : Union[str, Any] = prepare_images()
# test non-batched
UpperCAmelCase__ : Optional[Any] = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
UpperCAmelCase__ : Union[str, Any] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case__ )
# test batched
UpperCAmelCase__ : List[Any] = image_processing(snake_case__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
UpperCAmelCase__ : List[str] = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case__ )
| 199
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase ( ):
'''simple docstring'''
__A = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase__ )
return parser.parse_args()
def UpperCAmelCase ( ):
'''simple docstring'''
__A = parse_args()
# Import training_script as a module.
__A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A = script_fpath.stem
__A = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
__A = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 205
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : List[str] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205
| 1
|
"""simple docstring"""
import math
import sys
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = ''''''
try:
with open(lowercase_ , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : Dict = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = '''''', ''''''
__SCREAMING_SNAKE_CASE : Tuple = len(lowercase_ )
for i in range(len(lowercase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : int = lexicon[curr_string]
result += last_match_id
__SCREAMING_SNAKE_CASE : str = last_match_id + '''0'''
if math.loga(lowercase_ ).is_integer():
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for curr_key in list(lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = lexicon.pop(lowercase_ )
__SCREAMING_SNAKE_CASE : List[Any] = new_lex
__SCREAMING_SNAKE_CASE : Dict = last_match_id + '''1'''
index += 1
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
return result
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = 8
try:
with open(lowercase_ , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : str = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase_ ) , lowercase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase_ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__SCREAMING_SNAKE_CASE : Tuple = data_bits[counter:]
__SCREAMING_SNAKE_CASE : int = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(lowercase_ )
__SCREAMING_SNAKE_CASE : List[Any] = remove_prefix(lowercase_ )
__SCREAMING_SNAKE_CASE : Tuple = decompress_data(lowercase_ )
write_file_binary(lowercase_ , lowercase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 674
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''xlm-prophetnet'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self :List[str] , _lowerCamelCase :Optional[float] = 0.1 , _lowerCamelCase :Optional[Union[str, Callable]] = "gelu" , _lowerCamelCase :Optional[int] = 3_0_5_2_2 , _lowerCamelCase :Optional[int] = 1_0_2_4 , _lowerCamelCase :Optional[int] = 4_0_9_6 , _lowerCamelCase :Optional[int] = 1_2 , _lowerCamelCase :Optional[int] = 1_6 , _lowerCamelCase :Optional[int] = 4_0_9_6 , _lowerCamelCase :Optional[int] = 1_2 , _lowerCamelCase :Optional[int] = 1_6 , _lowerCamelCase :Optional[float] = 0.1 , _lowerCamelCase :Optional[float] = 0.1 , _lowerCamelCase :Optional[int] = 5_1_2 , _lowerCamelCase :Optional[float] = 0.0_2 , _lowerCamelCase :Optional[bool] = True , _lowerCamelCase :Optional[bool] = True , _lowerCamelCase :Optional[int] = 0 , _lowerCamelCase :Optional[int] = 2 , _lowerCamelCase :Optional[int] = 3_2 , _lowerCamelCase :Optional[int] = 1_2_8 , _lowerCamelCase :Optional[bool] = False , _lowerCamelCase :Optional[float] = 0.0 , _lowerCamelCase :Optional[bool] = True , _lowerCamelCase :Optional[int] = 0 , _lowerCamelCase :Optional[int] = 1 , _lowerCamelCase :Optional[int] = 2 , **_lowerCamelCase :int , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : str = num_encoder_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE : str = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : List[Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE : List[str] = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE : Any = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE : List[Any] = ngram
__SCREAMING_SNAKE_CASE : int = num_buckets
__SCREAMING_SNAKE_CASE : List[str] = relative_max_distance
__SCREAMING_SNAKE_CASE : str = disable_ngram_loss
__SCREAMING_SNAKE_CASE : Optional[int] = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE : int = attention_dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
__SCREAMING_SNAKE_CASE : Dict = dropout
__SCREAMING_SNAKE_CASE : Any = use_cache
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , add_cross_attention=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :List[Any] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 674
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Tuple:
if config_name_or_path is None:
lowerCamelCase : Optional[int] ='''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
lowerCamelCase : int =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Optional[int] =question_encoder_name_or_path
lowerCamelCase : Tuple =RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[str] =RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] =gen_config
lowerCamelCase : Optional[Any] =question_encoder_config
lowerCamelCase : Any =model_class.from_pretrained_question_encoder_generator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
rag_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Sanity check.
model_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Save tokenizers.
lowerCamelCase : Optional[Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
lowerCamelCase : Optional[Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
snake_case_ = parser.parse_args()
snake_case_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 262
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case_ ( _A):
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__lowercase , '''depth_multiplier''' ) )
class snake_case_ :
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3 , __lowercase=3_2 , __lowercase=0.2_5 , __lowercase=8 , __lowercase=8 , __lowercase=6 , __lowercase=3_2 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase="relu6" , __lowercase=1_2_8_0 , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=True , __lowercase=True , __lowercase=1_0 , __lowercase=None , ) -> int:
lowerCamelCase : Union[str, Any] =parent
lowerCamelCase : Union[str, Any] =batch_size
lowerCamelCase : int =num_channels
lowerCamelCase : str =image_size
lowerCamelCase : List[Any] =depth_multiplier
lowerCamelCase : Dict =depth_divisible_by
lowerCamelCase : Optional[Any] =min_depth
lowerCamelCase : Optional[Any] =expand_ratio
lowerCamelCase : List[str] =tf_padding
lowerCamelCase : int =output_stride
lowerCamelCase : Optional[Any] =first_layer_is_expansion
lowerCamelCase : List[Any] =finegrained_output
lowerCamelCase : int =hidden_act
lowerCamelCase : List[str] =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowerCamelCase : str =classifier_dropout_prob
lowerCamelCase : int =use_labels
lowerCamelCase : Optional[int] =is_training
lowerCamelCase : int =num_labels
lowerCamelCase : Dict =initializer_range
lowerCamelCase : Tuple =scope
def __lowercase ( self ) -> List[str]:
lowerCamelCase : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple =None
lowerCamelCase : Any =None
if self.use_labels:
lowerCamelCase : int =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : List[str] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase : Optional[Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCamelCase : Optional[Any] =MobileNetVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : int =model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCamelCase : Optional[Any] =self.num_labels
lowerCamelCase : Optional[int] =MobileNetVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Any =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCamelCase : int =self.num_labels
lowerCamelCase : List[Any] =MobileNetVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : List[str] =model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase : Union[str, Any] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Any =self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : str =config_and_inputs
lowerCamelCase : Dict ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( _A , _A , unittest.TestCase):
lowerCamelCase :Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase :Any = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase :List[str] = False
lowerCamelCase :Dict = False
lowerCamelCase :Any = False
lowerCamelCase :Dict = False
def __lowercase ( self ) -> Any:
lowerCamelCase : Union[str, Any] =MobileNetVaModelTester(self )
lowerCamelCase : List[str] =MobileNetVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> List[Any]:
lowerCamelCase , lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int =model_class(__lowercase )
lowerCamelCase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : str =[*signature.parameters.keys()]
lowerCamelCase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowercase ( self ) -> str:
def check_hidden_states_output(__lowercase , __lowercase , __lowercase ):
lowerCamelCase : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple =model(**self._prepare_for_class(__lowercase , __lowercase ) )
lowerCamelCase : Union[str, Any] =outputs.hidden_states
lowerCamelCase : Tuple =1_6
self.assertEqual(len(__lowercase ) , __lowercase )
lowerCamelCase , lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Optional[int] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def __lowercase ( self ) -> List[Any]:
lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def __lowercase ( self ) -> int:
lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def __lowercase ( self ) -> List[Any]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict =MobileNetVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def A__ ( ) -> List[Any]:
lowerCamelCase : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase):
@cached_property
def __lowercase ( self ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __lowercase ( self ) -> int:
lowerCamelCase : Tuple =MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__lowercase )
lowerCamelCase : Dict =self.default_image_processor
lowerCamelCase : Dict =prepare_img()
lowerCamelCase : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase : Any =model(**__lowercase )
# verify the logits
lowerCamelCase : Optional[Any] =torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __lowercase )
lowerCamelCase : Optional[Any] =torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[Any] =MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowerCamelCase : Union[str, Any] =model.to(__lowercase )
lowerCamelCase : List[Any] =MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowerCamelCase : Any =prepare_img()
lowerCamelCase : Dict =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase : Optional[Any] =model(**__lowercase )
lowerCamelCase : List[str] =outputs.logits
# verify the logits
lowerCamelCase : int =torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , __lowercase )
lowerCamelCase : List[Any] =torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) )
| 262
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : int = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase : Dict = {
"""yjernite/retribert-base-uncased""": 512,
}
__lowerCamelCase : List[str] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __A : Tuple=None , __A : List[Any]=None , __A : Optional[Any]=True , __A : List[Any]="[UNK]" , __A : Any="[SEP]" , __A : str="[PAD]" , __A : List[str]="[CLS]" , __A : str="[MASK]" , __A : Tuple=True , __A : str=None , **__A : Tuple , ):
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
snake_case__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __A ) != do_lower_case
or normalizer_state.get("strip_accents" , __A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __A ) != tokenize_chinese_chars
):
snake_case__ : List[str] = getattr(__A , normalizer_state.pop("type" ) )
snake_case__ : List[str] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Any = tokenize_chinese_chars
snake_case__ : Any = normalizer_class(**__A )
snake_case__ : List[Any] = do_lower_case
def _lowercase ( self : Any , __A : Any , __A : str=None ):
snake_case__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[int] = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
snake_case__ : int = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 297
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Dict , __A : str=1_3 , __A : str=7 , __A : Optional[int]=True , __A : int=True , __A : str=True , __A : Tuple=True , __A : Optional[int]=9_9 , __A : Optional[int]=3_2 , __A : Any=5 , __A : List[Any]=4 , __A : str=3_7 , __A : Union[str, Any]="gelu" , __A : str=0.1 , __A : Dict=0.1 , __A : Union[str, Any]=5_1_2 , __A : str=1_6 , __A : Optional[int]=2 , __A : List[Any]=0.0_2 , __A : Union[str, Any]=3 , __A : Optional[Any]=4 , __A : Optional[int]=None , ):
snake_case__ : int = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : Any = use_token_type_ids
snake_case__ : Optional[Any] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : int = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : str = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Union[str, Any] = scope
def _lowercase ( self : int ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Optional[Any] ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowercase ( self : Tuple , __A : Optional[Any] , __A : Optional[int] , __A : List[str] , __A : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : Tuple ):
snake_case__ : List[str] = NystromformerModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : str = model(__A , attention_mask=__A , token_type_ids=__A )
snake_case__ : Optional[int] = model(__A , token_type_ids=__A )
snake_case__ : Any = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __A : Tuple , __A : Tuple , __A : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Union[str, Any] , __A : Tuple ):
snake_case__ : Dict = NystromformerForMaskedLM(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , __A : List[str] , __A : Tuple , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Union[str, Any] ):
snake_case__ : Any = NystromformerForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , __A : str , __A : Any , __A : str , __A : Optional[int] , __A : str , __A : Optional[Any] , __A : Union[str, Any] ):
snake_case__ : List[str] = self.num_labels
snake_case__ : Dict = NystromformerForSequenceClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] , __A : Optional[Any] , __A : Any , __A : Optional[Any] , __A : int , __A : Union[str, Any] , __A : List[str] , __A : Any ):
snake_case__ : int = self.num_labels
snake_case__ : Tuple = NystromformerForTokenClassification(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : Union[str, Any] , __A : List[str] , __A : Dict , __A : List[Any] , __A : str , __A : Optional[int] ):
snake_case__ : str = self.num_choices
snake_case__ : Optional[int] = NystromformerForMultipleChoice(config=__A )
model.to(__A )
model.eval()
snake_case__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : str = config_and_inputs
snake_case__ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : int = NystromformerModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : str ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : str = type
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Any ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self : int ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = NystromformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : List[Any] ):
snake_case__ : str = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case__ : Any = model(__A )[0]
snake_case__ : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __A )
snake_case__ : Union[str, Any] = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Union[str, Any] = "the [MASK] of Belgium is Brussels"
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : Tuple = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : List[Any] = tokenizer(__A , return_tensors="pt" )
with torch.no_grad():
snake_case__ : List[str] = model(encoding.input_ids ).logits
snake_case__ : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__A ) , "capital" )
| 297
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def lowerCAmelCase ( UpperCamelCase_: Accelerator , UpperCamelCase_: int = 16 ) -> Dict:
'''simple docstring'''
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCamelCase_: Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase_: int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
UpperCamelCase_ , padding="longest" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] ) -> Optional[int]:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCamelCase_ ) == "1":
_a = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
set_seed(UpperCamelCase_ )
_a , _a = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
_a = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_a = os.path.split(UpperCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_a = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**UpperCamelCase_ )
_a = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_a = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase_ )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(UpperCamelCase_ ),
"epoch": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ) -> int:
'''simple docstring'''
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 612
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Any:
'''simple docstring'''
_a = os.path.join(args.tf_model_dir , "parameters.json" )
_a = json.loads(open(UpperCamelCase_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
_a = args.output + ".pt"
_a = OrderedDict()
with tf.device("/CPU:0" ):
_a = tf.train.load_checkpoint(args.tf_model_dir )
_a = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_a = reader.get_tensor(UpperCamelCase_ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_a = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_a = 8
_a = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/moe" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/softmlp/kernel" ):
_a = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_a = key_name[-9:-7]
for i in range(16 ):
_a = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_a = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/mlp" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p1/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/ln" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.feed_forward.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.feed_forward.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/att" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_a = state[:, 0, :, :]
_a = state[:, 1, :, :]
_a = state[:, 2, :, :]
_a = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/o/kernel" ):
_a = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_a = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/an" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.self_attn.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.self_attn.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_a = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_a = "model.%s.weight" % nlayer
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
if key_name.startswith("model/wte" ):
_a = "lm_head.weight"
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/wob" ):
_a = "final_logits_bias"
_a = vnp.copy() # same in embedded
_a = state.reshape((1, -1) )
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense/kernel":
_a = "model.last_project.weight"
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense_1/bias":
_a = "model.last_project.bias"
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
torch.save(UpperCamelCase_ , args.output )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 612
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class a ( torch.nn.Module ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowerCamelCase_ = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
lowerCamelCase_ = torch.nn.Softmax(dim=1 )
def UpperCamelCase ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
lowerCamelCase_ = W_supports['sizes'].tolist()
lowerCamelCase_ = W_supports['start_token_id'].item()
lowerCamelCase_ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCamelCase_ = self.BERT(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.BERT(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = W_supports['input_ids'] == start_token_id
lowerCamelCase_ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = support_sizes[i - 1]
lowerCamelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCamelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCamelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCamelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCamelCase_ = torch.vstack((p_starts, p_start) )
lowerCamelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCamelCase_ = p_start
lowerCamelCase_ = p_end
return p_starts, p_ends
| 549
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_SCREAMING_SNAKE_CASE : int = re.compile(R'''\s+''')
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> List[str]:
return {"hash": hashlib.mda(re.sub(_lowerCamelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def lowerCamelCase__ ( _lowerCamelCase : Tuple ) -> int:
lowerCamelCase_ = [len(_lowerCamelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_lowerCamelCase ), "line_max": max(_lowerCamelCase )}
def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> int:
lowerCamelCase_ = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[int] ) -> Optional[Any]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def lowerCamelCase__ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=5 ) -> int:
lowerCamelCase_ = ['auto-generated', 'autogenerated', 'automatically generated']
lowerCamelCase_ = example['content'].splitlines()
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Dict=5 , _lowerCamelCase : List[str]=0.05 ) -> Tuple:
lowerCamelCase_ = ['unit tests', 'test file', 'configuration file']
lowerCamelCase_ = example['content'].splitlines()
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# first test
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase_ = example['content'].count('\n' )
lowerCamelCase_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( _lowerCamelCase : Any ) -> List[str]:
lowerCamelCase_ = ['def ', 'class ', 'for ', 'while ']
lowerCamelCase_ = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Dict=4 ) -> Optional[Any]:
lowerCamelCase_ = example['content'].splitlines()
lowerCamelCase_ = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> List[str]:
lowerCamelCase_ = tokenizer(example['content'] , truncation=_lowerCamelCase )['input_ids']
lowerCamelCase_ = len(example['content'] ) / len(_lowerCamelCase )
return {"ratio": ratio}
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] ) -> List[Any]:
lowerCamelCase_ = {}
results.update(get_hash(_lowerCamelCase ) )
results.update(line_stats(_lowerCamelCase ) )
results.update(alpha_stats(_lowerCamelCase ) )
results.update(char_token_ratio(_lowerCamelCase ) )
results.update(is_autogenerated(_lowerCamelCase ) )
results.update(is_config_or_test(_lowerCamelCase ) )
results.update(has_no_keywords(_lowerCamelCase ) )
results.update(has_few_assignments(_lowerCamelCase ) )
return results
def lowerCamelCase__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ) -> Any:
if not check_uniques(_lowerCamelCase , _lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( _lowerCamelCase : str ) -> int:
with open(_lowerCamelCase , 'rb' ) as f_in:
with gzip.open(str(_lowerCamelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCamelCase , _lowerCamelCase )
os.unlink(_lowerCamelCase )
# Settings
_SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(PreprocessingArguments)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
if args.num_workers is None:
_SCREAMING_SNAKE_CASE : List[str] = multiprocessing.cpu_count()
_SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
_SCREAMING_SNAKE_CASE : int = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
_SCREAMING_SNAKE_CASE : Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_SCREAMING_SNAKE_CASE : List[Any] = set(ds.unique('''hash'''))
_SCREAMING_SNAKE_CASE : Optional[int] = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_SCREAMING_SNAKE_CASE : Dict = time.time()
_SCREAMING_SNAKE_CASE : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_SCREAMING_SNAKE_CASE : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_SCREAMING_SNAKE_CASE : Optional[Any] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_SCREAMING_SNAKE_CASE : Dict = str(data_dir / F'''file-{file_number+1:012}.json''')
_SCREAMING_SNAKE_CASE : Any = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 549
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''ViTFeatureExtractor''']
__lowerCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __a ( __UpperCamelCase ):
__lowercase : Optional[Any] = 'beit'
def __init__( self , lowerCAmelCase__=8_192 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=224 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=[3, 5, 7, 11] , lowerCAmelCase__=[1, 2, 3, 6] , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=256 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=255 , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: Optional[Any] = vocab_size
lowercase__: Dict = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: List[Any] = num_attention_heads
lowercase__: List[str] = intermediate_size
lowercase__: Any = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[Any] = initializer_range
lowercase__: Tuple = layer_norm_eps
lowercase__: Optional[Any] = image_size
lowercase__: List[str] = patch_size
lowercase__: List[str] = num_channels
lowercase__: List[Any] = use_mask_token
lowercase__: Tuple = use_absolute_position_embeddings
lowercase__: Tuple = use_relative_position_bias
lowercase__: int = use_shared_relative_position_bias
lowercase__: Dict = layer_scale_init_value
lowercase__: List[Any] = drop_path_rate
lowercase__: Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Optional[Any] = out_indices
lowercase__: Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: Dict = use_auxiliary_head
lowercase__: Union[str, Any] = auxiliary_loss_weight
lowercase__: Tuple = auxiliary_channels
lowercase__: Any = auxiliary_num_convs
lowercase__: Optional[Any] = auxiliary_concat_input
lowercase__: Optional[int] = semantic_loss_ignore_index
class __a ( __UpperCamelCase ):
__lowercase : Optional[int] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 335
| 1
|
def __lowerCAmelCase ( a__ ) -> str:
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__a = ''''''
while len(a__ ) % 3 != 0:
__a = '''0''' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(a__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(a__ ):
oct_val += int(2 ** (2 - index) * int(a__ ) )
oct_string += str(a__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 219
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
A_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
A_ = get_tests_dir("fixtures/vocab.json")
A_ = get_tests_dir("fixtures")
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def _lowercase ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig()
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
copyfile(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , '''vocab.json''' ) )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
# save in new folder
processor.save_pretrained(lowerCAmelCase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(lowerCAmelCase_ )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as f:
f.write(json.dumps(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
# save in new folder
processor.save_pretrained(lowerCAmelCase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(lowerCAmelCase_ )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as f:
f.write(json.dumps(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(lowerCAmelCase_ )
# copy relevant files
copyfile(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as f:
f.write('''{}''' )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
SCREAMING_SNAKE_CASE_ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
SCREAMING_SNAKE_CASE_ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
AutoProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = CustomTokenizer(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = CustomProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Dict = False
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = False
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = """AutoFeatureExtractor"""
UpperCAmelCase : Tuple = """AutoTokenizer"""
UpperCAmelCase : List[Any] = False
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
AutoProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def _lowercase ( cls : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def _lowercase ( cls : Dict ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase_ , '''test-processor''' ) , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(new_processor.feature_extractor , lowerCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase_ , '''test-processor-org''' ) , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(new_processor.feature_extractor , lowerCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = CustomTokenizer(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = CustomProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
SCREAMING_SNAKE_CASE_ = Repository(lowerCAmelCase_ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(lowerCAmelCase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCAmelCase_ , '''tokenizer_config.json''' ) ) as f:
SCREAMING_SNAKE_CASE_ = json.load(lowerCAmelCase_ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 393
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Union[str, Any] = 16
_UpperCAmelCase : List[str] = 32
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> str:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Any = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : Optional[int] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : Tuple = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : Any = 8
else:
lowerCamelCase__ : List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase__ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1":
lowerCamelCase__ : str = 2
# New Code #
lowerCamelCase__ : List[Any] = int(args.gradient_accumulation_steps )
lowerCamelCase__ : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase__ : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : List[Any] = config['lr']
lowerCamelCase__ : int = int(config['num_epochs'] )
lowerCamelCase__ : List[Any] = int(config['seed'] )
lowerCamelCase__ : List[Any] = int(config['batch_size'] )
lowerCamelCase__ : str = evaluate.load('glue' , 'mrpc' )
set_seed(_UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase__ : Dict = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
lowerCamelCase__ : str = model(**_UpperCAmelCase )
lowerCamelCase__ : List[str] = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**_UpperCAmelCase )
lowerCamelCase__ : List[Any] = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_UpperCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_UpperCAmelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCamelCase__ : str = parser.parse_args()
lowerCamelCase__ : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 188
|
import enum
import shutil
import sys
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = shutil.get_terminal_size()
_UpperCAmelCase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class lowerCAmelCase ( enum.Enum ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase="" ) -> int:
sys.stdout.write(str(_UpperCAmelCase ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="" ) -> Union[str, Any]:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
forceWrite('\r' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def SCREAMING_SNAKE_CASE ( ) -> int:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 188
| 1
|
import os
import sys
__a: Union[str, Any] = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__a: Union[str, Any] = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Union[str, Any]:
return AutoConfig.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Any:
return AutoTokenizer.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple:
return AutoModel.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple:
return AutoModelForCausalLM.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Optional[Any]:
return AutoModelForMaskedLM.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__snake_case , **__snake_case )
| 108
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = hidden_states.shape
lowercase_ : Tuple = jax.image.resize(
a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
lowercase_ : List[Any] = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , a : int ):
'''simple docstring'''
lowercase_ : Any = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: int = None
__lowerCamelCase: float = 0.0
__lowerCamelCase: bool = None
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Tuple = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : List[str] = nn.Dense(a , dtype=self.dtype )
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Any = nn.Dropout(self.dropout_prob )
lowercase_ : Dict = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ : Optional[Any] = None
if use_nin_shortcut:
lowercase_ : Union[str, Any] = nn.Conv(
a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : List[str] , a : str , a : Dict , a : List[str]=True ):
'''simple docstring'''
lowercase_ : Dict = hidden_states
lowercase_ : int = self.norma(a )
lowercase_ : List[Any] = nn.swish(a )
lowercase_ : Dict = self.conva(a )
lowercase_ : Optional[int] = self.time_emb_proj(nn.swish(a ) )
lowercase_ : Tuple = jnp.expand_dims(jnp.expand_dims(a , 1 ) , 1 )
lowercase_ : List[str] = hidden_states + temb
lowercase_ : Optional[Any] = self.norma(a )
lowercase_ : Any = nn.swish(a )
lowercase_ : List[str] = self.dropout(a , a )
lowercase_ : int = self.conva(a )
if self.conv_shortcut is not None:
lowercase_ : List[str] = self.conv_shortcut(a )
return hidden_states + residual
| 620
| 0
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
def _a ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=UpperCamelCase_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=UpperCamelCase_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=UpperCamelCase_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=UpperCamelCase_ , default="data/dump" , help="The dump file prefix." )
lowerCAmelCase__ = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
lowerCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(UpperCamelCase_ )} examples to process." )
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 10_000
lowerCAmelCase__ = time.time()
for text in data:
lowerCAmelCase__ = F"{bos} {text.strip()} {sep}"
lowerCAmelCase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
rslt.append(UpperCamelCase_ )
iter += 1
if iter % interval == 0:
lowerCAmelCase__ = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
lowerCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(UpperCamelCase_ )} examples processed." )
lowerCAmelCase__ = F"{args.dump_file}.{args.tokenizer_name}.pickle"
lowerCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase__ = [np.uintaa(UpperCamelCase_ ) for d in rslt]
else:
lowerCAmelCase__ = [np.intaa(UpperCamelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(rslt_ , UpperCamelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 115
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
a_, a_, a_ = False, False, False
@dataclass
class lowercase__ :
a_ =None
a_ =True
a_ =True
a_ =None
# Automatically constructed
a_ ="dict"
a_ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a_ =field(default="""Audio""", init=_UpperCAmelCase, repr=_UpperCAmelCase )
def __call__( self )-> Optional[int]:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self , __UpperCAmelCase )-> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase__ = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase__ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowerCAmelCase__ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
lowerCAmelCase__ = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
lowerCAmelCase__ , lowerCAmelCase__ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
lowerCAmelCase__ = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
lowerCAmelCase__ = token_per_repo_id or {}
lowerCAmelCase__ = path.split("::" )[-1]
try:
lowerCAmelCase__ = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCAmelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase__ = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
lowerCAmelCase__ = array.T
if self.mono:
lowerCAmelCase__ = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase__ = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
lowerCAmelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self )-> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCAmelCase ( self , __UpperCAmelCase )-> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
lowerCAmelCase__ = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCAmelCase__ = storage.field("bytes" )
else:
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCAmelCase__ = storage.field("path" )
else:
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def UpperCAmelCase ( self , __UpperCAmelCase )-> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase ):
with xopen(__UpperCAmelCase , "rb" ) as f:
lowerCAmelCase__ = f.read()
return bytes_
lowerCAmelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase__ = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 115
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Union[str, Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Optional[Any] = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _lowercase ( __lowerCamelCase : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=__lowerCamelCase )[0]
@deprecated(__lowerCamelCase ,'''Please use tf.data to implement this functionality.''' )
def _lowercase ( __lowerCamelCase : int ) -> Dict:
'''simple docstring'''
print('''Extracting''' ,f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
UpperCamelCase__ : Tuple = _readaa(__lowerCamelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
UpperCamelCase__ : Tuple = _readaa(__lowerCamelCase )
UpperCamelCase__ : List[Any] = _readaa(__lowerCamelCase )
UpperCamelCase__ : Dict = _readaa(__lowerCamelCase )
UpperCamelCase__ : str = bytestream.read(rows * cols * num_images )
UpperCamelCase__ : List[Any] = numpy.frombuffer(__lowerCamelCase ,dtype=numpy.uinta )
UpperCamelCase__ : Union[str, Any] = data.reshape(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,1 )
return data
@deprecated(__lowerCamelCase ,'''Please use tf.one_hot on tensors.''' )
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = labels_dense.shape[0]
UpperCamelCase__ : Union[str, Any] = numpy.arange(__lowerCamelCase ) * num_classes
UpperCamelCase__ : int = numpy.zeros((num_labels, num_classes) )
UpperCamelCase__ : Optional[int] = 1
return labels_one_hot
@deprecated(__lowerCamelCase ,'''Please use tf.data to implement this functionality.''' )
def _lowercase ( __lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple=False ,__lowerCamelCase : Dict=10 ) -> Optional[int]:
'''simple docstring'''
print('''Extracting''' ,f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
UpperCamelCase__ : str = _readaa(__lowerCamelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
UpperCamelCase__ : Optional[Any] = _readaa(__lowerCamelCase )
UpperCamelCase__ : Optional[int] = bytestream.read(__lowerCamelCase )
UpperCamelCase__ : List[str] = numpy.frombuffer(__lowerCamelCase ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCamelCase ,__lowerCamelCase )
return labels
class UpperCamelCase__ :
@deprecated(
__lowerCamelCase, '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''', )
def __init__( self : Optional[Any], __lowerCamelCase : int, __lowerCamelCase : Dict, __lowerCamelCase : List[Any]=False, __lowerCamelCase : List[str]=False, __lowerCamelCase : List[str]=dtypes.floataa, __lowerCamelCase : Union[str, Any]=True, __lowerCamelCase : Optional[int]=None, ) -> Dict:
UpperCamelCase__ ,UpperCamelCase__ : Any = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCamelCase__ : Tuple = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
UpperCamelCase__ : Dict = 1_00_00
UpperCamelCase__ : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
UpperCamelCase__ : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCamelCase__ : Any = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCamelCase__ : str = images.astype(numpy.floataa )
UpperCamelCase__ : Any = numpy.multiply(__lowerCamelCase, 1.0 / 255.0 )
UpperCamelCase__ : List[Any] = images
UpperCamelCase__ : Any = labels
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[Any] = 0
@property
def __lowercase( self : Optional[Any] ) -> str:
return self._images
@property
def __lowercase( self : int ) -> str:
return self._labels
@property
def __lowercase( self : Tuple ) -> Dict:
return self._num_examples
@property
def __lowercase( self : List[str] ) -> Optional[int]:
return self._epochs_completed
def __lowercase( self : Union[str, Any], __lowerCamelCase : List[str], __lowerCamelCase : Tuple=False, __lowerCamelCase : List[str]=True ) -> List[Any]:
if fake_data:
UpperCamelCase__ : int = [1] * 7_84
UpperCamelCase__ : List[str] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
UpperCamelCase__ : int = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCamelCase__ : List[str] = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
UpperCamelCase__ : List[Any] = self.images[perma]
UpperCamelCase__ : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCamelCase__ : List[str] = self._num_examples - start
UpperCamelCase__ : Dict = self._images[start : self._num_examples]
UpperCamelCase__ : Dict = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCamelCase__ : List[str] = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
UpperCamelCase__ : Tuple = self.images[perm]
UpperCamelCase__ : Tuple = self.labels[perm]
# Start next epoch
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : str = batch_size - rest_num_examples
UpperCamelCase__ : Tuple = self._index_in_epoch
UpperCamelCase__ : str = self._images[start:end]
UpperCamelCase__ : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCamelCase__ : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCamelCase ,'''Please write your own downloading logic.''' )
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if not gfile.Exists(__lowerCamelCase ):
gfile.MakeDirs(__lowerCamelCase )
UpperCamelCase__ : str = os.path.join(__lowerCamelCase ,__lowerCamelCase )
if not gfile.Exists(__lowerCamelCase ):
urllib.request.urlretrieve(__lowerCamelCase ,__lowerCamelCase ) # noqa: S310
with gfile.GFile(__lowerCamelCase ) as f:
UpperCamelCase__ : List[str] = f.size()
print('''Successfully downloaded''' ,__lowerCamelCase ,__lowerCamelCase ,'''bytes.''' )
return filepath
@deprecated(
__lowerCamelCase ,'''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def _lowercase ( __lowerCamelCase : List[Any] ,__lowerCamelCase : str=False ,__lowerCamelCase : Optional[int]=False ,__lowerCamelCase : int=dtypes.floataa ,__lowerCamelCase : int=True ,__lowerCamelCase : Dict=5000 ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : int=DEFAULT_SOURCE_URL ,) -> List[str]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=__lowerCamelCase ,one_hot=__lowerCamelCase ,dtype=__lowerCamelCase ,seed=__lowerCamelCase )
UpperCamelCase__ : List[str] = fake()
UpperCamelCase__ : Tuple = fake()
UpperCamelCase__ : int = fake()
return _Datasets(train=__lowerCamelCase ,validation=__lowerCamelCase ,test=__lowerCamelCase )
if not source_url: # empty string check
UpperCamelCase__ : Dict = DEFAULT_SOURCE_URL
UpperCamelCase__ : Tuple = '''train-images-idx3-ubyte.gz'''
UpperCamelCase__ : Optional[int] = '''train-labels-idx1-ubyte.gz'''
UpperCamelCase__ : Dict = '''t10k-images-idx3-ubyte.gz'''
UpperCamelCase__ : Any = '''t10k-labels-idx1-ubyte.gz'''
UpperCamelCase__ : Union[str, Any] = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + train_images_file )
with gfile.Open(__lowerCamelCase ,'''rb''' ) as f:
UpperCamelCase__ : Union[str, Any] = _extract_images(__lowerCamelCase )
UpperCamelCase__ : str = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + train_labels_file )
with gfile.Open(__lowerCamelCase ,'''rb''' ) as f:
UpperCamelCase__ : Optional[Any] = _extract_labels(__lowerCamelCase ,one_hot=__lowerCamelCase )
UpperCamelCase__ : int = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + test_images_file )
with gfile.Open(__lowerCamelCase ,'''rb''' ) as f:
UpperCamelCase__ : Tuple = _extract_images(__lowerCamelCase )
UpperCamelCase__ : List[Any] = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + test_labels_file )
with gfile.Open(__lowerCamelCase ,'''rb''' ) as f:
UpperCamelCase__ : Tuple = _extract_labels(__lowerCamelCase ,one_hot=__lowerCamelCase )
if not 0 <= validation_size <= len(__lowerCamelCase ):
UpperCamelCase__ : Any = (
'''Validation size should be between 0 and '''
F'{len(__lowerCamelCase )}. Received: {validation_size}.'
)
raise ValueError(__lowerCamelCase )
UpperCamelCase__ : int = train_images[:validation_size]
UpperCamelCase__ : Optional[Any] = train_labels[:validation_size]
UpperCamelCase__ : Tuple = train_images[validation_size:]
UpperCamelCase__ : Optional[int] = train_labels[validation_size:]
UpperCamelCase__ : Dict = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
UpperCamelCase__ : int = _DataSet(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
UpperCamelCase__ : Any = _DataSet(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
UpperCamelCase__ : str = _DataSet(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
return _Datasets(train=__lowerCamelCase ,validation=__lowerCamelCase ,test=__lowerCamelCase )
| 344
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
a__ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class UpperCamelCase__ :
a__ : str = field(
default=__lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__ : str = field(
default=__lowerCamelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__ : Optional[bool] = field(
default=__lowerCamelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _lowercase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' ,__lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCamelCase__ : Optional[int] = load_dataset(
'''xnli''' ,model_args.language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
UpperCamelCase__ : Any = load_dataset(
'''xnli''' ,model_args.train_language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : Union[str, Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCamelCase__ : Optional[int] = load_dataset(
'''xnli''' ,model_args.language ,split='''validation''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : int = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCamelCase__ : Union[str, Any] = load_dataset(
'''xnli''' ,model_args.language ,split='''test''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : List[Any] = predict_dataset.features['''label'''].names
# Labels
UpperCamelCase__ : Dict = len(__lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__lowerCamelCase ,idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} ,labelaid={label: i for i, label in enumerate(__lowerCamelCase )} ,finetuning_task='''xnli''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__lowerCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCamelCase__ : int = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCamelCase__ : Any = False
def preprocess_function(__lowerCamelCase : Dict ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] ,examples['''hypothesis'''] ,padding=__lowerCamelCase ,max_length=data_args.max_seq_length ,truncation=__lowerCamelCase ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ : Tuple = min(len(__lowerCamelCase ) ,data_args.max_train_samples )
UpperCamelCase__ : str = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCamelCase__ : Union[str, Any] = train_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on train dataset''' ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCamelCase ) ) ,3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Optional[Any] = min(len(__lowerCamelCase ) ,data_args.max_eval_samples )
UpperCamelCase__ : Dict = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCamelCase__ : Optional[int] = eval_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on validation dataset''' ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCamelCase__ : int = min(len(__lowerCamelCase ) ,data_args.max_predict_samples )
UpperCamelCase__ : List[Any] = predict_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCamelCase__ : int = predict_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on prediction dataset''' ,)
# Get the metric function
UpperCamelCase__ : Union[str, Any] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
UpperCamelCase__ : str = p.predictions[0] if isinstance(p.predictions ,__lowerCamelCase ) else p.predictions
UpperCamelCase__ : Optional[int] = np.argmax(__lowerCamelCase ,axis=1 )
return metric.compute(predictions=__lowerCamelCase ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCamelCase__ : List[Any] = default_data_collator
elif training_args.fpaa:
UpperCamelCase__ : List[str] = DataCollatorWithPadding(__lowerCamelCase ,pad_to_multiple_of=8 )
else:
UpperCamelCase__ : List[str] = None
# Initialize our Trainer
UpperCamelCase__ : Any = Trainer(
model=__lowerCamelCase ,args=__lowerCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=__lowerCamelCase ,tokenizer=__lowerCamelCase ,data_collator=__lowerCamelCase ,)
# Training
if training_args.do_train:
UpperCamelCase__ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : Union[str, Any] = last_checkpoint
UpperCamelCase__ : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = train_result.metrics
UpperCamelCase__ : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
UpperCamelCase__ : int = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,__lowerCamelCase )
trainer.save_metrics('''train''' ,__lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate(eval_dataset=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.log_metrics('''eval''' ,__lowerCamelCase )
trainer.save_metrics('''eval''' ,__lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = trainer.predict(__lowerCamelCase ,metric_key_prefix='''predict''' )
UpperCamelCase__ : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase )
)
UpperCamelCase__ : List[Any] = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.log_metrics('''predict''' ,__lowerCamelCase )
trainer.save_metrics('''predict''' ,__lowerCamelCase )
UpperCamelCase__ : Dict = np.argmax(__lowerCamelCase ,axis=1 )
UpperCamelCase__ : List[Any] = os.path.join(training_args.output_dir ,'''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase ,'''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__lowerCamelCase ):
UpperCamelCase__ : Tuple = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 344
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCamelCase = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : Dict = None
a__ : str = os.path.abspath(os.path.join("examples" , "by_feature" ) )
a__ : Union[str, Any] = os.path.abspath("examples" )
for item in os.listdir(__UpperCamelCase ):
if item not in EXCLUDE_EXAMPLES:
a__ : Any = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCamelCase , feature_script=__UpperCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
a__ : Tuple = compare_against_test(
os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a__ : List[Any] = "\n".join(__UpperCamelCase )
if special_strings is not None:
for string in special_strings:
a__ : Optional[Any] = diff.replace(__UpperCamelCase , "" )
self.assertEqual(__UpperCamelCase , "" )
def _A ( self ):
"""simple docstring"""
self.one_complete_example("complete_nlp_example.py" , __UpperCamelCase )
self.one_complete_example("complete_nlp_example.py" , __UpperCamelCase )
def _A ( self ):
"""simple docstring"""
a__ : Tuple = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
a__ : Dict = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.one_complete_example("complete_cv_example.py" , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Union[str, Any] = False
@classmethod
def _A ( cls ):
"""simple docstring"""
super().setUpClass()
a__ : Dict = tempfile.mkdtemp()
a__ : Optional[int] = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
a__ : Dict = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _A ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _A ( self ):
"""simple docstring"""
a__ : Dict = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Tuple = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
a__ : Dict = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : int = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
a__ : Optional[int] = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
self.assertNotIn("epoch 0:" , __UpperCamelCase )
self.assertIn("epoch 1:" , __UpperCamelCase )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
a__ : Optional[int] = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
if torch.cuda.is_available():
a__ : Any = torch.cuda.device_count()
else:
a__ : Optional[Any] = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __UpperCamelCase )
self.assertIn("epoch 1:" , __UpperCamelCase )
else:
self.assertIn("epoch 0:" , __UpperCamelCase )
self.assertIn("epoch 1:" , __UpperCamelCase )
@slow
def _A ( self ):
"""simple docstring"""
a__ : Any = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
a__ : Tuple = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
a__ : Optional[Any] = re.findall("({.+})" , __UpperCamelCase )
a__ : Any = [r for r in results if "accuracy" in r][-1]
a__ : int = ast.literal_eval(__UpperCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.7_5 )
def _A ( self ):
"""simple docstring"""
a__ : Dict = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
a__ : int = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "tracking" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Tuple = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 707
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
lowerCamelCase = {"""mobilebert-uncased""": 5_12}
lowerCamelCase = {}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Union[str, Any] = VOCAB_FILES_NAMES
A :Tuple = PRETRAINED_VOCAB_FILES_MAP
A :int = PRETRAINED_INIT_CONFIGURATION
A :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A :Optional[Any] = MobileBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
a__ : Any = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
a__ : Optional[int] = do_lower_case
a__ : Optional[Any] = strip_accents
a__ : List[Any] = tokenize_chinese_chars
a__ : Optional[Any] = normalizer_class(**__UpperCAmelCase )
a__ : Optional[Any] = do_lower_case
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
a__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : List[str] = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 207
| 0
|
'''simple docstring'''
def A ( UpperCamelCase_ : bytes ) -> str:
'''simple docstring'''
return "".join([hex(UpperCamelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase_ )] )
def A ( UpperCamelCase_ : str ) -> bytes:
'''simple docstring'''
if (len(UpperCamelCase_ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase_ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : List[str] , __lowerCAmelCase : TransformeraDModel , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : KarrasDiffusionSchedulers , __lowerCAmelCase : Optional[Dict[int, str]] = None , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=__lowerCAmelCase , vae=__lowerCAmelCase , scheduler=__lowerCAmelCase )
# create a imagenet -> id dictionary for easier use
a = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
a = int(__lowerCAmelCase )
a = dict(sorted(self.labels.items() ) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = list(__lowerCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = len(__lowerCAmelCase )
a = self.transformer.config.sample_size
a = self.transformer.config.in_channels
a = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCAmelCase , device=self.device , dtype=self.transformer.dtype , )
a = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
a = torch.tensor(__lowerCAmelCase , device=self.device ).reshape(-1 )
a = torch.tensor([1000] * batch_size , device=self.device )
a = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
a = latent_model_input[: len(__lowerCAmelCase ) // 2]
a = torch.cat([half, half] , dim=0 )
a = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
a = t
if not torch.is_tensor(__lowerCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a = latent_model_input.device.type == "mps"
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = torch.floataa if is_mps else torch.floataa
else:
a = torch.intaa if is_mps else torch.intaa
a = torch.tensor([timesteps] , dtype=__lowerCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
a = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
a = self.transformer(
__lowerCAmelCase , timestep=__lowerCAmelCase , class_labels=__lowerCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
a , a = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a , a = torch.split(__lowerCAmelCase , len(__lowerCAmelCase ) // 2 , dim=0 )
a = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a = torch.cat([half_eps, half_eps] , dim=0 )
a = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a , a = torch.split(__lowerCAmelCase , __lowerCAmelCase , dim=1 )
else:
a = noise_pred
# compute previous image: x_t -> x_t-1
a = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
if guidance_scale > 1:
a , a = latent_model_input.chunk(2 , dim=0 )
else:
a = latent_model_input
a = 1 / self.vae.config.scaling_factor * latents
a = self.vae.decode(__lowerCAmelCase ).sample
a = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a ( __UpperCAmelCase : List[Any] ) -> str:
__magic_name__: Union[str, Any] = args.pruning_method
__magic_name__: Tuple = args.threshold
__magic_name__: Dict = args.model_name_or_path.rstrip("""/""" )
__magic_name__: str = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
__magic_name__: Optional[Any] = torch.load(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) )
__magic_name__: int = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__magic_name__: List[Any] = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
__magic_name__: List[str] = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
__magic_name__: Union[str, Any] = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
__magic_name__: str = MagnitudeBinarizer.apply(inputs=__UpperCAmelCase , threshold=__UpperCAmelCase )
__magic_name__: Tuple = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__magic_name__: str = name[:-6]
__magic_name__: Dict = model[f'{prefix_}mask_scores']
__magic_name__: Any = TopKBinarizer.apply(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: List[Any] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__magic_name__: List[Any] = name[:-6]
__magic_name__: List[Any] = model[f'{prefix_}mask_scores']
__magic_name__: Tuple = ThresholdBinarizer.apply(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__magic_name__: Dict = name[:-6]
__magic_name__: int = model[f'{prefix_}mask_scores']
__magic_name__, __magic_name__: Tuple = -0.1, 1.1
__magic_name__: Tuple = torch.sigmoid(__UpperCAmelCase )
__magic_name__: Any = s * (r - l) + l
__magic_name__: Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
__magic_name__: Union[str, Any] = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__magic_name__: List[str] = os.path.join(
os.path.dirname(__UpperCAmelCase ) , f'bertarized_{os.path.basename(__UpperCAmelCase )}' )
if not os.path.isdir(__UpperCAmelCase ):
shutil.copytree(__UpperCAmelCase , __UpperCAmelCase )
print(f'\nCreated folder {target_model_path}' )
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 96
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662
| 0
|
from random import shuffle
import tensorflow as tf
from numpy import array
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(snake_case )
assert noofclusters < len(snake_case )
# Find out the dimensionality
__SCREAMING_SNAKE_CASE : Dict = len(vectors[0] )
# Will help select random centroids from among the available vectors
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(len(snake_case ) ) )
shuffle(snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__SCREAMING_SNAKE_CASE : Dict = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__SCREAMING_SNAKE_CASE : Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
__SCREAMING_SNAKE_CASE : Tuple = tf.placeholder('''float64''' , [dim] )
__SCREAMING_SNAKE_CASE : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case , snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__SCREAMING_SNAKE_CASE : Optional[int] = [tf.Variable(0 ) for i in range(len(snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__SCREAMING_SNAKE_CASE : Optional[int] = tf.placeholder('''int32''' )
__SCREAMING_SNAKE_CASE : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case , snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__SCREAMING_SNAKE_CASE : List[Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__SCREAMING_SNAKE_CASE : List[str] = tf.reduce_mean(snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__SCREAMING_SNAKE_CASE : str = tf.placeholder('''float''' , [dim] )
__SCREAMING_SNAKE_CASE : Any = tf.placeholder('''float''' , [dim] )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case , snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__SCREAMING_SNAKE_CASE : str = tf.placeholder('''float''' , [noofclusters] )
__SCREAMING_SNAKE_CASE : str = tf.argmin(snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__SCREAMING_SNAKE_CASE : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__SCREAMING_SNAKE_CASE : Union[str, Any] = 100
for _ in range(snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__SCREAMING_SNAKE_CASE : Optional[Any] = [
sess.run(snake_case , feed_dict={va: vect, va: sess.run(snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__SCREAMING_SNAKE_CASE : str = sess.run(
snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case ):
# Collect all the vectors assigned to this cluster
__SCREAMING_SNAKE_CASE : List[Any] = [
vectors[i]
for i in range(len(snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__SCREAMING_SNAKE_CASE : str = sess.run(
snake_case , feed_dict={mean_input: array(snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__SCREAMING_SNAKE_CASE : List[Any] = sess.run(snake_case )
__SCREAMING_SNAKE_CASE : str = sess.run(snake_case )
return centroids, assignments
| 131
|
def a__ ( snake_case , snake_case , snake_case=False ):
"""simple docstring"""
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : Dict = len(set_a.intersection(snake_case ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : str = len(snake_case ) + len(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = len(set_a.union(snake_case ) )
return intersection / union
if isinstance(snake_case , (list, tuple) ) and isinstance(snake_case , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(snake_case ) + len(snake_case )
return len(snake_case ) / union
else:
__SCREAMING_SNAKE_CASE : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(snake_case ) / len(snake_case )
return len(snake_case ) / len(snake_case )
return None
if __name__ == "__main__":
lowercase_ = {"""a""", """b""", """c""", """d""", """e"""}
lowercase_ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 131
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
SCREAMING_SNAKE_CASE_ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _UpperCAmelCase ( self , A_ , A_ , A_ = False , A_ = False , A_ = False , A_ = False , ):
'''simple docstring'''
_UpperCAmelCase : Dict = len(references[0] )
if any(len(A_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_UpperCAmelCase : List[Any] = [[refs[i] for refs in references] for i in range(A_ )]
_UpperCAmelCase : Tuple = TER(
normalized=A_ , no_punct=A_ , asian_support=A_ , case_sensitive=A_ , )
_UpperCAmelCase : Tuple = sb_ter.corpus_score(A_ , A_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
|
from statistics import mean, stdev
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int = 3 ) -> list:
_UpperCAmelCase : Tuple = min(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = max(lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase ) for x in data]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int = 3 ) -> list:
_UpperCAmelCase : Union[str, Any] = mean(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = stdev(lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase ) for x in data]
| 300
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """lilt"""
def __init__( self : Optional[Any] , _lowercase : Dict=30_522 , _lowercase : Any=768 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : str=3_072 , _lowercase : int="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=512 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=0.0_2 , _lowercase : int=1e-12 , _lowercase : Any=0 , _lowercase : List[str]="absolute" , _lowercase : Dict=None , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=1_024 , **_lowercase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = classifier_dropout
A = channel_shrink_ratio
A = max_ad_position_embeddings
| 91
|
"""simple docstring"""
from math import factorial
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
A = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 91
| 1
|
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ,A ,A=None ,A=None ):
UpperCAmelCase = data
UpperCAmelCase = previous
UpperCAmelCase = next_node
def __str__( self ):
return F'''{self.data}'''
def _UpperCamelCase ( self ):
return self.data
def _UpperCamelCase ( self ):
return self.next
def _UpperCamelCase ( self ):
return self.previous
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = head
def __iter__( self ):
return self
def _UpperCamelCase ( self ):
if not self.current:
raise StopIteration
else:
UpperCAmelCase = self.current.get_data()
UpperCAmelCase = self.current.get_next()
return value
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None # First node in list
UpperCAmelCase = None # Last node in list
def __str__( self ):
UpperCAmelCase = self.head
UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase = current.get_next()
return " ".join(str(A ) for node in nodes )
def __contains__( self ,A ):
UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase = current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def _UpperCamelCase ( self ):
if self.head:
return self.head.get_data()
return None
def _UpperCamelCase ( self ):
if self.tail:
return self.tail.get_data()
return None
def _UpperCamelCase ( self ,A ):
if self.head is None:
UpperCAmelCase = node
UpperCAmelCase = node
else:
self.insert_before_node(self.head ,A )
def _UpperCamelCase ( self ,A ):
if self.head is None:
self.set_head(A )
else:
self.insert_after_node(self.tail ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node(A )
if self.head is None:
self.set_head(A )
else:
self.set_tail(A )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = node
UpperCAmelCase = node.previous
if node.get_previous() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = node
UpperCAmelCase = node.next
if node.get_next() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = 1
UpperCAmelCase = Node(A )
UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(A ,A )
return
current_position += 1
UpperCAmelCase = node.next
self.insert_after_node(self.tail ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def _UpperCamelCase ( self ,A ):
if (node := self.get_node(A )) is not None:
if node == self.head:
UpperCAmelCase = self.head.get_next()
if node == self.tail:
UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(A )
@staticmethod
def _UpperCamelCase ( A ):
if node.get_next():
UpperCAmelCase = node.previous
if node.get_previous():
UpperCAmelCase = node.next
UpperCAmelCase = None
UpperCAmelCase = None
def _UpperCamelCase ( self ):
return self.head is None
def _a ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,head_mask=A )
UpperCAmelCase = model(A ,token_type_ids=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,n_embd=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(A )
UpperCAmelCase = torch.tensor([[481, 4_735, 544]] ,dtype=torch.long ,device=A ) # the president is
UpperCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(A ,do_sample=A )
self.assertListEqual(output_ids[0].tolist() ,A )
| 341
| 1
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Dict = """linear"""
__a : str = """cosine"""
__a : int = """cosine_with_restarts"""
__a : List[str] = """polynomial"""
__a : Dict = """constant"""
__a : Optional[int] = """constant_with_warmup"""
__a : List[str] = """piecewise_constant"""
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ):
"""simple docstring"""
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ):
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ):
"""simple docstring"""
UpperCAmelCase__ :Optional[Any] = {}
UpperCAmelCase__ :Tuple = step_rules.split(',' )
for rule_str in rule_list[:-1]:
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = rule_str.split(':' )
UpperCAmelCase__ :str = int(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Any = float(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = value
UpperCAmelCase__ :Optional[Any] = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
UpperCAmelCase__ :Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase__ :Union[str, Any] = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ):
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ):
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ):
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1E-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ):
"""simple docstring"""
UpperCAmelCase__ :str = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase__ :Union[str, Any] = lr_init - lr_end
UpperCAmelCase__ :int = num_training_steps - num_warmup_steps
UpperCAmelCase__ :Any = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase__ :Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case : List[str] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ):
"""simple docstring"""
UpperCAmelCase__ :Tuple = SchedulerType(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 433
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case : List[str] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase__ :List[str] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE , id=SCREAMING_SNAKE_CASE )
| 433
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : Dict = {"vocab_file": "sentencepiece.model"}
snake_case : Optional[int] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
snake_case : Optional[Any] = {
"google/rembert": 256,
}
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=False , _a=True , _a=True , _a="[CLS]" , _a="[SEP]" , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , **_a , ):
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
__magic_name__ : Any = do_lower_case
__magic_name__ : List[Any] = remove_space
__magic_name__ : Dict = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : Dict = self.__dict__.copy()
__magic_name__ : Dict = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
__magic_name__ : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a , _a=False ):
__magic_name__ : List[str] = self.sp_model.EncodeAsPieces(_a )
return pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.sp_model.decode_pieces(_a )
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[Any] = [self.sep_token_id]
__magic_name__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
__magic_name__ : int = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 124
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=False , _a=True , _a=False , _a=True , _a=33 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__magic_name__ : Dict = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : int = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : str = use_token_type_ids
__magic_name__ : Dict = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Optional[Any] = scope
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = None
if self.use_input_mask:
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
__magic_name__ : List[Any] = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Dict = EsmModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , attention_mask=_a )
__magic_name__ : List[str] = model(_a )
__magic_name__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = EsmForMaskedLM(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = self.num_labels
__magic_name__ : int = EsmForTokenClassification(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = ()
UpperCamelCase__ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = EsmModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : str = type
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = EsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : List[str] = EsmEmbeddings(config=_a )
__magic_name__ : Dict = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__magic_name__ : Tuple = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__magic_name__ : Dict = create_position_ids_from_input_ids(_a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : str = EsmEmbeddings(config=_a )
__magic_name__ : Optional[Any] = torch.empty(2 , 4 , 30 )
__magic_name__ : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__magic_name__ : Tuple = torch.as_tensor([expected_single_positions, expected_single_positions] )
__magic_name__ : List[str] = embeddings.create_position_ids_from_inputs_embeds(_a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch
class _snake_case ( snake_case ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Dict = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Dict = model(_a )[0]
__magic_name__ : Optional[Any] = 33
__magic_name__ : Optional[int] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _a )
__magic_name__ : List[Any] = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Optional[int] = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Optional[Any] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 124
| 1
|
"""simple docstring"""
import pprint
import requests
SCREAMING_SNAKE_CASE_ : Optional[int] = 'https://zenquotes.io/api'
def _snake_case ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _snake_case ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = random_quotes()
pprint.pprint(response)
| 500
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
A__ = [0 for i in range(r + 1 )]
# nc0 = 1
A__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A__ = min(UpperCAmelCase_ , UpperCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 500
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Any=32 , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[str]=10 , UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase : int=[1, 1, 2, 1] , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : int=None , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = embeddings_size
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = num_labels
A_ = scope
A_ = len(UpperCAmelCase )
def __A ( self : str ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = self.get_config()
return config, pixel_values
def __A ( self : Union[str, Any] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = FlaxRegNetModel(config=UpperCAmelCase )
A_ = model(UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
A_ = self.num_labels
A_ = FlaxRegNetForImageClassification(config=UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Union[str, Any] ):
A_ = self.prepare_config_and_inputs()
A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_lowerCamelCase : Dict = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = False
def __A ( self : Optional[int] ):
A_ = FlaxRegNetModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Dict ):
return
def __A ( self : Optional[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __A ( self : str ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __A ( self : int ):
pass
def __A ( self : Optional[Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : List[str] ):
def check_hidden_states_output(UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ):
A_ = model_class(UpperCAmelCase )
A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ):
return model(pixel_values=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
A_ = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def __A ( self : int ):
A_ = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="np" )
A_ = model(**UpperCAmelCase )
# verify the logits
A_ = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
| 86
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 86
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCamelCase : Dict = '''src/transformers'''
# Matches is_xxx_available()
__lowerCamelCase : Optional[Any] = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowerCamelCase : Dict = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCamelCase : List[str] = re.compile(R'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowerCamelCase : List[str] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowerCamelCase : Tuple = re.compile(R'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCamelCase : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCamelCase : Dict = re.compile('''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCamelCase : List[str] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowerCamelCase : Dict = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowerCamelCase : Optional[Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
__lowerCamelCase : Optional[Any] = re.compile(R'''^\s*else:''')
def lowercase__ ( __A: Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(lowercase_ ) is None:
return None
__magic_name__ : Any = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def lowercase__ ( __A: Optional[Any] ):
'''simple docstring'''
with open(lowercase_ ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
__magic_name__ : int = f.readlines()
__magic_name__ : Any = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ : int = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__magic_name__ : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
__magic_name__ : Tuple = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
__magic_name__ : Optional[Any] = re.findall('''\[([^\]]+)\]''' ,lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__magic_name__ : str = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
__magic_name__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ : Union[str, Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__magic_name__ : Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
__magic_name__ : Optional[int] = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(''', ''' )
__magic_name__ : int = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
__magic_name__ : List[str] = _re_between_brackets.search(lowercase_ ).groups()[0].split(''', ''' )
__magic_name__ : str = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''\"''' ):
objects.append(line[1_3:-3] )
line_index += 1
__magic_name__ : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ : int = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__magic_name__ : str = lines[line_index]
__magic_name__ : Tuple = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__magic_name__ : int = lines[line_index]
__magic_name__ : Union[str, Any] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__magic_name__ : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase__ ( __A: Optional[Any] ,__A: List[str] ):
'''simple docstring'''
def find_duplicates(__A: List[str] ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ : Tuple = []
for key in import_dict_objects.keys():
__magic_name__ : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__magic_name__ : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ : Dict = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : int = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
__magic_name__ : Any = os.path.join(lowercase_ ,'''__init__.py''' )
__magic_name__ : List[Any] = parse_init(lowercase_ )
if objects is not None:
__magic_name__ : List[str] = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
__magic_name__ : Optional[int] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError('''\n\n'''.join(lowercase_ ) )
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__magic_name__ : Optional[int] = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
__magic_name__ : Any = short_path.replace(os.path.sep ,'''.''' )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ : int = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
__magic_name__ : Union[str, Any] = short_path.replace('''.py''' ,'''''' ).replace(os.path.sep ,'''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowercase_ )
return submodules
__lowerCamelCase : List[Any] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : Tuple = importlib.util.spec_from_file_location(
'''transformers''' ,os.path.join(lowercase_ ,'''__init__.py''' ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
__magic_name__ : str = spec.loader.load_module()
__magic_name__ : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase_ ) > 0:
__magic_name__ : List[Any] = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 707
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowercase__ ( __A: str ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowercase__ ( __A: str ):
'''simple docstring'''
__magic_name__ : List[str] = np.max(_outputs ,axis=-1 ,keepdims=__A )
__magic_name__ : int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=__A )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''sigmoid'''
UpperCamelCase__ ='''softmax'''
UpperCamelCase__ ='''none'''
@add_end_docstrings(
_lowerCamelCase ,R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' ,)
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =False
UpperCamelCase__ =ClassificationFunction.NONE
def __init__( self : Tuple , **lowerCamelCase_ : Tuple ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]="" , **lowerCamelCase_ : int ) -> str:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__magic_name__ : Dict = tokenizer_kwargs
__magic_name__ : Dict = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
__magic_name__ : List[str] = self.model.config.return_all_scores
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) or top_k is None:
__magic_name__ : Dict = top_k
__magic_name__ : List[Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , lowerCamelCase_ , )
if return_all_scores:
__magic_name__ : str = None
else:
__magic_name__ : int = 1
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__magic_name__ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[int] = super().__call__(*lowerCamelCase_ , **lowerCamelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__magic_name__ : int = '''top_k''' not in kwargs
if isinstance(args[0] , lowerCamelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Any , **lowerCamelCase_ : int ) -> Dict[str, GenericTensor]:
__magic_name__ : Union[str, Any] = self.framework
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return self.tokenizer(**lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1 and isinstance(inputs[0] , lowerCamelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : Dict ) -> Union[str, Any]:
return self.model(**lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Any=True ) -> int:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__magic_name__ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__magic_name__ : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
__magic_name__ : Dict = self.model.config.function_to_apply
else:
__magic_name__ : Optional[Any] = ClassificationFunction.NONE
__magic_name__ : Any = model_outputs['''logits'''][0]
__magic_name__ : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__magic_name__ : List[str] = sigmoid(lowerCamelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__magic_name__ : Optional[int] = softmax(lowerCamelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
__magic_name__ : Optional[int] = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__magic_name__ : Union[str, Any] = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(lowerCamelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCamelCase_ : x["score"] , reverse=lowerCamelCase_ )
if top_k is not None:
__magic_name__ : Dict = dict_scores[:top_k]
return dict_scores
| 501
| 0
|
'''simple docstring'''
__lowerCAmelCase = 0 # The first color of the flag.
__lowerCAmelCase = 1 # The second color of the flag.
__lowerCAmelCase = 2 # The third color of the flag.
__lowerCAmelCase = (red, white, blue)
def __UpperCamelCase ( lowercase_ : list ):
"""simple docstring"""
if not sequence:
return []
if len(_UpperCamelCase ) == 1:
return list(_UpperCamelCase )
a_ = 0
a_ = len(_UpperCamelCase ) - 1
a_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
a_ , a_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a_ , a_ = sequence[high], sequence[mid]
high -= 1
else:
a_ = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(_UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input("Enter numbers separated by commas:\n").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(",")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 536
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
lowercase__ : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
lowercase__ : Dict = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
lowercase__ : int = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]="uniform_average" , UpperCAmelCase__ : int=True ) ->Any:
UpperCAmelCase_ = mean_squared_error(
UpperCAmelCase__ , UpperCAmelCase__ , sample_weight=UpperCAmelCase__ , multioutput=UpperCAmelCase__ , squared=UpperCAmelCase__ )
return {"mse": mse}
| 390
| 0
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _lowercase (nn.Module ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCamelCase_ = nn.Linear(3 , 4 )
UpperCamelCase_ = nn.BatchNormad(4 )
UpperCamelCase_ = nn.Linear(4 , 5 )
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , model.state_dict() )
UpperCamelCase_ = os.path.join(lowercase_ , "index.json" )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase_ = os.path.join(lowercase_ , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase_ = torch.randn(2 , 3 , dtype=lowercase_ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = offload_weight(lowercase_ , "weight" , lowercase_ , {} )
UpperCamelCase_ = os.path.join(lowercase_ , "weight.dat" )
self.assertTrue(os.path.isfile(lowercase_ ) )
self.assertDictEqual(lowercase_ , {"weight": {"shape": [2, 3], "dtype": str(lowercase_ ).split("." )[1]}} )
UpperCamelCase_ = load_offloaded_weight(lowercase_ , index["weight"] )
self.assertTrue(torch.equal(lowercase_ , lowercase_ ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ModelForTest()
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {k: v for k, v in state_dict.items() if """linear2""" not in k}
UpperCamelCase_ = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , lowercase_ )
UpperCamelCase_ = OffloadedWeightsLoader(state_dict=lowercase_ , save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_ , weight_map[key] ) )
UpperCamelCase_ = {k: v for k, v in state_dict.items() if """weight""" in k}
UpperCamelCase_ = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , lowercase_ )
UpperCamelCase_ = OffloadedWeightsLoader(state_dict=lowercase_ , save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , lowercase_ )
# Duplicates are removed
UpperCamelCase_ = OffloadedWeightsLoader(state_dict=lowercase_ , save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_ , weight_map[key] ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
UpperCamelCase_ = extract_submodules_state_dict(lowercase_ , ["a.1", "a.2"] )
self.assertDictEqual(lowercase_ , {"a.1": 0, "a.2": 2} )
UpperCamelCase_ = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
UpperCamelCase_ = extract_submodules_state_dict(lowercase_ , ["a.1", "a.2"] )
self.assertDictEqual(lowercase_ , {"a.1.a": 0, "a.2.a": 2} )
| 708
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict =logging.get_logger(__name__)
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase_ = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 504
| 0
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any , _A : str ):
_UpperCamelCase = 3
_UpperCamelCase = 250
_UpperCamelCase = ids_tensor((batch_size, length) , _A )
_UpperCamelCase = torch.ones((batch_size, length) , device=_A , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = MaxLengthCriteria(max_length=10 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
_UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Any ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_A ) , 1 )
| 10
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 0
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : int , lowercase_ : Dict ) -> Any:
'''simple docstring'''
lowercase =AlbertConfig.from_json_file(lowercase_ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase =AlbertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 145
|
'''simple docstring'''
from __future__ import annotations
_UpperCAmelCase : str = 10
def UpperCamelCase ( lowercase_ : list[int] ) -> list[int]:
'''simple docstring'''
lowercase =1
lowercase =max(lowercase_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowercase =[[] for _ in range(lowercase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowercase =int((i / placement) % RADIX )
buckets[tmp].append(lowercase_ )
# put each buckets' contents into list_of_ints
lowercase =0
for b in range(lowercase_ ):
for i in buckets[b]:
lowercase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCAmelCase = Lock()
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[Any] ) -> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase : Any = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase : int = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase : Union[str, Any] = Pipe()
UpperCamelCase : Tuple = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCamelCase : int = temp_rs
UpperCamelCase : Optional[int] = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCamelCase : str = Pipe()
UpperCamelCase : str = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCamelCase : Optional[int] = temp_rs
UpperCamelCase : List[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCamelCase : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ) -> Optional[Any]:
UpperCamelCase : int = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCamelCase : Optional[Any] = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 40
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Dict = VQModel
__magic_name__ : Tuple = "sample"
@property
def a__( self : List[str] , lowerCAmelCase : Optional[int]=(32, 32) )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def a__( self : Dict )-> List[str]:
"""simple docstring"""
return (3, 32, 32)
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
pass
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
pass
def a__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase )
UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase = image.to(lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase ).sample
UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
| 210
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = analyze_text(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_lowerCAmelCase = sum(single_char_strings.values() )
# one length string
_lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowerCAmelCase = single_char_strings[ch]
_lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowerCAmelCase = sum(two_char_strings.values() )
_lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowerCAmelCase = cha + cha
if sequence in two_char_strings:
_lowerCAmelCase = two_char_strings[sequence]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = Counter() # type: ignore
_lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __a():
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 717
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "▁"
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_SCREAMING_SNAKE_CASE = {
"facebook/xglm-564M": 20_48,
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCAmelCase = 7
_lowerCAmelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_lowerCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCAmelCase )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase ))
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self ) -> Any:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 489
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
__SCREAMING_SNAKE_CASE : int = (
"'table' has to be of square shaped array but got a "
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = np.zeros((rows, columns) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Tuple = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
__SCREAMING_SNAKE_CASE : int = (table[i][j] - total) / upper[j][j]
__SCREAMING_SNAKE_CASE : str = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : int = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger()
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : list = field(default_factory=__SCREAMING_SNAKE_CASE )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self , a__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def a_ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : bool = True
def __call__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = Tracker(self.dest )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : str = Tracker(self.src )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(a__ )} operations while'
f' destination module has {len(a__ )}.' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , a__ ):
super().__init__()
__SCREAMING_SNAKE_CASE : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
__SCREAMING_SNAKE_CASE : str = len(a__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleDict(a__ )
def a_ ( self , a__ ):
return get_trunk_forward_outputs(
a__ , out_feat_keys=a__ , feature_blocks=self._feature_blocks , )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a__ ):
# default to timm!
if x not in self:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_name_to_timm(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = partial(lambda: (timm.create_model(a__ , pretrained=a__ ).eval(), None) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = super().__getitem__(a__ )
return val
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __getitem__( self , a__ ):
if "seer" in x and "in1k" not in x:
__SCREAMING_SNAKE_CASE : Any = RegNetModel
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = RegNetForImageClassification
return val
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
__SCREAMING_SNAKE_CASE : Optional[int] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = from_model_func()
__SCREAMING_SNAKE_CASE : Any = our_model_func(_SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : Any = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE , raise_if_mismatch=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(_SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__SCREAMING_SNAKE_CASE : str = manually_copy_vissl_head(_SCREAMING_SNAKE_CASE , our_model.state_dict() , _SCREAMING_SNAKE_CASE )
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = our_model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = (
our_outputs.logits if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
__SCREAMING_SNAKE_CASE : Dict = from_model(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = from_output[-1] if type(_SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : str = our_outputs.hidden_states[-1]
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[int] = 2_2_4 if "seer" not in name else 3_8_4
# we can use the convnext one
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
print(f'Pushed {name}' )
def __A ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0_0_0
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__SCREAMING_SNAKE_CASE : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
__SCREAMING_SNAKE_CASE : Optional[int] = NameToOurModelFuncMap()
__SCREAMING_SNAKE_CASE : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , model_dir=str(_SCREAMING_SNAKE_CASE ) , map_location="cpu" )
__SCREAMING_SNAKE_CASE : Dict = model_func()
# check if we have a head, if yes add it
__SCREAMING_SNAKE_CASE : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
__SCREAMING_SNAKE_CASE : int = model_state_dict["trunk"]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
__SCREAMING_SNAKE_CASE : Any = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : str = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase = parser.parse_args()
lowercase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 211
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = KandinskyVaaControlnetImgaImgPipeline
UpperCAmelCase__ = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCAmelCase__ = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCAmelCase__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ = False
@property
def __lowercase( self ) -> Any:
return 32
@property
def __lowercase( self ) -> Any:
return 32
@property
def __lowercase( self ) -> Tuple:
return self.time_input_dim
@property
def __lowercase( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __lowercase( self ) -> List[str]:
return 100
@property
def __lowercase( self ) -> Tuple:
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowercase( self ) -> List[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowercase( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__UpperCamelCase = DDIMScheduler(**_SCREAMING_SNAKE_CASE )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Tuple:
__UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((256, 256) )
# create hint
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __lowercase( self ) -> str:
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self ) -> List[str]:
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = init_image.resize((512, 512) )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__UpperCamelCase = torch.from_numpy(np.array(_SCREAMING_SNAKE_CASE ) ).float() / 255.0
__UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__UpperCamelCase = 'A robot, 4k photo'
__UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.8_5 , generator=_SCREAMING_SNAKE_CASE , negative_prompt='' , ).to_tuple()
__UpperCamelCase = pipeline(
image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , hint=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
__UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 721
|
def _a ( __lowercase ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567
| 0
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_lowerCAmelCase : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_lowerCAmelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_lowerCAmelCase : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_lowerCAmelCase : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_lowerCAmelCase : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : List[Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_lowerCAmelCase : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_lowerCAmelCase : Tuple = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_a )
class A_ :
def __call__( self: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Union[bool, str] = False ,__lowerCAmelCase: Union[bool, str] = False ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: Optional[bool] = None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
elif titles is None or texts is None:
_lowerCamelCase : int = titles if texts is None else texts
return super().__call__(
__lowerCAmelCase ,__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Dict = titles if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [titles]
_lowerCamelCase : List[Any] = texts if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [texts]
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase )
_lowerCamelCase : Tuple = questions if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [questions] * n_passages
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
F"""There should be as many titles than texts but got {len(__lowerCAmelCase )} titles and {len(__lowerCAmelCase )} texts.""" )
_lowerCamelCase : Dict = super().__call__(__lowerCAmelCase ,__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase )["input_ids"]
_lowerCamelCase : List[str] = super().__call__(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase )["input_ids"]
_lowerCamelCase : List[Any] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
}
if return_attention_mask is not False:
_lowerCamelCase : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCamelCase : Union[str, Any] = attention_mask
return self.pad(__lowerCAmelCase ,padding=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: BatchEncoding ,__lowerCAmelCase: DPRReaderOutput ,__lowerCAmelCase: int = 16 ,__lowerCAmelCase: int = 64 ,__lowerCAmelCase: int = 4 ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = reader_input["input_ids"]
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = reader_output[:3]
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase )
_lowerCamelCase : Any = sorted(range(__lowerCAmelCase ) ,reverse=__lowerCAmelCase ,key=relevance_logits.__getitem__ )
_lowerCamelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCamelCase : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCamelCase : Optional[Any] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCamelCase : str = sequence_ids.index(self.pad_token_id )
else:
_lowerCamelCase : Any = len(__lowerCAmelCase )
_lowerCamelCase : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=__lowerCAmelCase ,top_spans=__lowerCAmelCase ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=__lowerCAmelCase ,start_index=__lowerCAmelCase ,end_index=__lowerCAmelCase ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(__lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : int = []
for start_index, start_score in enumerate(__lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCamelCase : Optional[int] = sorted(__lowerCAmelCase ,key=lambda __lowerCAmelCase : x[1] ,reverse=__lowerCAmelCase )
_lowerCamelCase : Dict = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCamelCase : Any = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class A_ ( _a , _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ['input_ids', 'attention_mask']
| 46
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=12 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0 , lowerCAmelCase_=None , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = projection_dim
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = scope
__lowercase = bos_token_id
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__lowercase = input_mask.numpy()
__lowercase , __lowercase = input_mask.shape
__lowercase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
__lowercase = 1
__lowercase = 0
__lowercase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase_ )
def snake_case__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = TFBlipTextModel(config=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = BlipTextModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFBlipTextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase_ )
| 321
| 0
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# Initialise PyTorch model
_a : Tuple = FunnelConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : List[str] = FunnelBaseModel(UpperCamelCase_ ) if base_model else FunnelModel(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
__UpperCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 249
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( UpperCamelCase_ = "AAPL" ):
_a : List[str] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_a : Any = BeautifulSoup(requests.get(UpperCamelCase_ ).text , '''html.parser''' )
_a : Optional[int] = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 249
| 1
|
from __future__ import annotations
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase__ , lowercase__ , )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : list[list[str]] = []
depth_first_search([] , [] , [] , lowercase__ , lowercase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase__ )
print('''''' )
print(len(lowercase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 696
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696
| 1
|
from functools import lru_cache
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
_lowerCAmelCase = 2
_lowerCAmelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(__SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(__SCREAMING_SNAKE_CASE ) )
def _a ( __SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(__SCREAMING_SNAKE_CASE ) ) in (0, 1)
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
_lowerCAmelCase = 2
while True:
# Increment each value of a generated range
_lowerCAmelCase = [base + i for i in range(__SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_lowerCAmelCase = [upf_len(__SCREAMING_SNAKE_CASE ) for x in group]
checker.append(__SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(__SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def _a ( __SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
_lowerCAmelCase = run(__SCREAMING_SNAKE_CASE )
return results[0] if len(__SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 585
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : int=30 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , _lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=1 / 255 , _lowerCAmelCase : int=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_pad
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=False ) -> Dict:
if not batched:
_lowerCAmelCase = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
_lowerCAmelCase = self.size['shortest_edge']
elif w > h:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = self.size['shortest_edge']
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
_lowerCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
_lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
# prepare image and target
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
# prepare image, target and masks_path
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCAmelCase = ConditionalDetrImageProcessor(format='coco_panoptic' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify masks
_lowerCAmelCase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowerCAmelCase )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
| 585
| 1
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=10 , lowercase_=[10, 20, 30, 40] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ) -> Optional[int]:
a__ =parent
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =embeddings_size
a__ =hidden_sizes
a__ =depths
a__ =is_training
a__ =use_labels
a__ =hidden_act
a__ =num_labels
a__ =scope
a__ =len(UpperCAmelCase__)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.num_labels)
a__ =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =RegNetModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
a__ =model(UpperCAmelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =self.num_labels
a__ =RegNetForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
a__ =model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.prepare_config_and_inputs()
a__ , a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case =(
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> str:
a__ =RegNetModelTester(self)
a__ =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__)
def __UpperCamelCase ( self) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> List[Any]:
return
@unittest.skip(reason='RegNet does not use inputs_embeds')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Optional[int]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(UpperCAmelCase__)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def __UpperCamelCase ( self) -> Any:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def __UpperCamelCase ( self) -> str:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(config=UpperCAmelCase__)
for name, module in model.named_modules():
if isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __UpperCamelCase ( self) -> str:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
a__ =model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
with torch.no_grad():
a__ =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
a__ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ =layer_type
a__ =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def __UpperCamelCase ( self) -> Any:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =RegNetModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(UpperCAmelCase__)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=UpperCAmelCase__ , return_tensors='pt').to(UpperCAmelCase__)
# forward pass
with torch.no_grad():
a__ =model(**UpperCAmelCase__)
# verify the logits
a__ =torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
a__ =torch.tensor([-0.41_80, -1.50_51, -3.48_36]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 20
|
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = max_position_embeddings
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool:
__SCREAMING_SNAKE_CASE = input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = start_length
__SCREAMING_SNAKE_CASE = max_new_tokens
__SCREAMING_SNAKE_CASE = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict:
__SCREAMING_SNAKE_CASE = max_time
__SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool:
return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self )
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = stopping_criteria.max_length
__SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) )
return new_stopping_criteria
| 682
| 0
|
import logging
import os
from .state import PartialState
class _lowerCamelCase (logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
__snake_case = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__snake_case = kwargs.pop('main_process_only' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('in_order' , SCREAMING_SNAKE_CASE_ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE_ ):
if self._should_log(SCREAMING_SNAKE_CASE_ ):
__snake_case , __snake_case = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif in_order:
__snake_case = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__snake_case , __snake_case = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
state.wait_for_everyone()
def __lowercase( __snake_case : str ,__snake_case : str = None ) -> Union[str, Any]:
if log_level is None:
__snake_case = os.environ.get('ACCELERATE_LOG_LEVEL' ,__snake_case )
__snake_case = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case ,{} )
| 345
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowercase( __snake_case : List[str] ,__snake_case : Optional[int] ,__snake_case : Dict ,__snake_case : List[str] ,__snake_case : Optional[int] ) -> int:
# Load configuration defined in the metadata file
with open(__snake_case ) as metadata_file:
__snake_case = json.load(__snake_case )
__snake_case = LukeConfig(use_entity_aware_attention=__snake_case ,**metadata['model_config'] )
# Load in the weights from the checkpoint_path
__snake_case = torch.load(__snake_case ,map_location='cpu' )['module']
# Load the entity vocab file
__snake_case = load_original_entity_vocab(__snake_case )
# add an entry for [MASK2]
__snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case = AddedToken('<ent>' ,lstrip=__snake_case ,rstrip=__snake_case )
__snake_case = AddedToken('<ent2>' ,lstrip=__snake_case ,rstrip=__snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ,'r' ) as f:
__snake_case = json.load(__snake_case )
__snake_case = 'MLukeTokenizer'
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
with open(os.path.join(__snake_case ,MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
__snake_case = MLukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
__snake_case = tokenizer.convert_tokens_to_ids(['@'] )[0]
__snake_case = tokenizer.convert_tokens_to_ids(['#'] )[0]
__snake_case = state_dict['embeddings.word_embeddings.weight']
__snake_case = word_emb[ent_init_index].unsqueeze(0 )
__snake_case = word_emb[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case = state_dict[bias_name]
__snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case = f'''encoder.layer.{layer_index}.attention.self.'''
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
__snake_case = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case = state_dict['entity_predictions.bias']
__snake_case = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case = LukeForMaskedLM(config=__snake_case ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__snake_case = state_dict[key]
else:
__snake_case = state_dict[key]
__snake_case , __snake_case = model.load_state_dict(__snake_case ,strict=__snake_case )
if set(__snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case = MLukeTokenizer.from_pretrained(__snake_case ,task='entity_classification' )
__snake_case = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__snake_case = (0, 9)
__snake_case = tokenizer(__snake_case ,entity_spans=[span] ,return_tensors='pt' )
__snake_case = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 33, 7_68) )
__snake_case = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 1, 7_68) )
__snake_case = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case = MLukeTokenizer.from_pretrained(__snake_case )
__snake_case = 'Tokyo is the capital of <mask>.'
__snake_case = (24, 30)
__snake_case = tokenizer(__snake_case ,entity_spans=[span] ,return_tensors='pt' )
__snake_case = model(**__snake_case )
__snake_case = encoding['input_ids'][0].tolist()
__snake_case = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__snake_case )
__snake_case = outputs.entity_logits[0][0].argmax().item()
__snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__snake_case ) )
model.save_pretrained(__snake_case )
def __lowercase( __snake_case : int ) -> Any:
__snake_case = ['[MASK]', '[PAD]', '[UNK]']
__snake_case = [json.loads(__snake_case ) for line in open(__snake_case )]
__snake_case = {}
for entry in data:
__snake_case = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case = entity_id
break
__snake_case = f'''{language}:{entity_name}'''
__snake_case = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 345
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A_ = logging.get_logger("transformers.models.encodec")
A_ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
A_ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
A_ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
A_ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
A_ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
A_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A_ = []
A_ = []
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
for attribute in key.split('.' ):
lowerCamelCase_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
lowerCamelCase_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase_ = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase ,__UpperCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(__UpperCamelCase )[0].split('.' )[-2]
lowerCamelCase_ = mapped_key.replace('*' ,__UpperCamelCase )
if "weight_g" in name:
lowerCamelCase_ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ = 'weight_v'
elif "weight_ih_l0" in name:
lowerCamelCase_ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCamelCase_ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCamelCase_ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCamelCase_ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCamelCase_ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCamelCase_ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCamelCase_ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCamelCase_ = 'bias_hh_l1'
elif "bias" in name:
lowerCamelCase_ = 'bias'
elif "weight" in name:
lowerCamelCase_ = 'weight'
elif "running_mean" in name:
lowerCamelCase_ = 'running_mean'
elif "running_var" in name:
lowerCamelCase_ = 'running_var'
elif "num_batches_tracked" in name:
lowerCamelCase_ = 'num_batches_tracked'
else:
lowerCamelCase_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ,) -> Any:
if config_path is not None:
lowerCamelCase_ = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
lowerCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase_ = [8, 5, 4, 4]
lowerCamelCase_ = [2.2]
lowerCamelCase_ = 64
lowerCamelCase_ = 3_20_00
lowerCamelCase_ = 20_48
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
elif model_name == "encodec_48khz":
lowerCamelCase_ = [8, 5, 4, 2]
lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0]
lowerCamelCase_ = 4_80_00
lowerCamelCase_ = 2
lowerCamelCase_ = False
lowerCamelCase_ = 'time_group_norm'
lowerCamelCase_ = True
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCamelCase_ = EncodecModel(__UpperCamelCase )
lowerCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels ,sampling_rate=config.sampling_rate ,chunk_length_s=config.chunk_length_s ,overlap=config.overlap ,)
feature_extractor.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase_ = original_checkpoint['best_state']
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = 8
# DPR tok
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(A__ , exist_ok=A__ )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(A__ , range(len(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(A__ , exist_ok=A__ )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def UpperCamelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def UpperCamelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def UpperCamelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_SCREAMING_SNAKE_CASE = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_SCREAMING_SNAKE_CASE = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(A__ )
rag_tokenizer.save_pretrained(A__ )
_SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(A__ , config=A__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , A__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , A__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_SCREAMING_SNAKE_CASE = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_SCREAMING_SNAKE_CASE = tokenizer(A__ )
self.assertIsNotNone(A__ )
@slow
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_SCREAMING_SNAKE_CASE = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_SCREAMING_SNAKE_CASE = tokenizer(A__ )
self.assertIsNotNone(A__ )
| 591
| 0
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self , UpperCAmelCase_ = 1 , UpperCAmelCase_ = None , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 50 , UpperCAmelCase_ = "pil" , UpperCAmelCase_ = True , **UpperCAmelCase_ , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase_ , )
lowerCamelCase : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : Union[str, Any] = {}
if accepts_eta:
lowerCamelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase : str = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
lowerCamelCase : List[str] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Optional[int] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase : List[str] = self.vqvae.decode(UpperCAmelCase_ ).sample
lowerCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : List[str] = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 133
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if num <= 0:
lowerCamelCase : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(a_ )
lowerCamelCase : Optional[Any] = [True] * (num + 1)
lowerCamelCase : int = []
lowerCamelCase : Dict = 2
lowerCamelCase : List[str] = int(math.sqrt(a_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_ )
# Set multiples of start be False
for i in range(start * start, num + 1, a_ ):
if sieve[i] is True:
lowerCamelCase : Optional[int] = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(a_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 133
| 1
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = "vision-encoder-decoder"
__lowerCamelCase = True
def __init__(self , **lowerCamelCase__ ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
A__ = kwargs.pop("""encoder""" )
A__ = encoder_config.pop("""model_type""" )
A__ = kwargs.pop("""decoder""" )
A__ = decoder_config.pop("""model_type""" )
A__ = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
A__ = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
A__ = True
@classmethod
def A (cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = version.parse("1.11")
@property
def A (self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self ):
"""simple docstring"""
return 1E-4
@property
def A (self ):
"""simple docstring"""
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _UpperCamelCase ( __snake_case):
@property
def A (self ):
"""simple docstring"""
A__ = OrderedDict()
A__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A__ = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def A (self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , ):
"""simple docstring"""
import torch
A__ = OrderedDict()
A__ = super().generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
A__ ,A__ = dummy_input["""input_ids"""].shape
A__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
A__ = dummy_input.pop("""input_ids""" )
A__ = dummy_input.pop("""attention_mask""" )
A__ = torch.zeros(lowerCamelCase__ )
return common_inputs
class _UpperCamelCase ( __snake_case):
@property
def A (self ):
"""simple docstring"""
pass
def A (self , lowerCamelCase__ ):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ )
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "default" ):
"""simple docstring"""
A__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ , lowerCamelCase__ )
| 574
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Any ):
A__ = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
A__ = F"""{src_lang}-{tgt_lang}"""
A__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
A__ = os.path.join(UpperCamelCase , """README.md""" )
print(F"""Generating {path}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(UpperCamelCase )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split("-")
lowerCamelCase__ = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 574
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Tuple = '''sew-d'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=2 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2_5_6 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=("p2c", "c2p") , UpperCAmelCase="layer_norm" , UpperCAmelCase="gelu_python" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=0 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
__UpperCAmelCase =hidden_size
__UpperCAmelCase =feat_extract_norm
__UpperCAmelCase =feat_extract_activation
__UpperCAmelCase =list(__a)
__UpperCAmelCase =list(__a)
__UpperCAmelCase =list(__a)
__UpperCAmelCase =conv_bias
__UpperCAmelCase =num_conv_pos_embeddings
__UpperCAmelCase =num_conv_pos_embedding_groups
__UpperCAmelCase =len(self.conv_dim)
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =squeeze_factor
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =position_buckets
__UpperCAmelCase =share_att_key
__UpperCAmelCase =relative_attention
__UpperCAmelCase =norm_rel_ebd
__UpperCAmelCase =list(__a)
__UpperCAmelCase =hidden_act
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_dropout
__UpperCAmelCase =attention_dropout
__UpperCAmelCase =activation_dropout
__UpperCAmelCase =feat_proj_dropout
__UpperCAmelCase =final_dropout
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =feature_layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase =apply_spec_augment
__UpperCAmelCase =mask_time_prob
__UpperCAmelCase =mask_time_length
__UpperCAmelCase =mask_time_min_masks
__UpperCAmelCase =mask_feature_prob
__UpperCAmelCase =mask_feature_length
__UpperCAmelCase =mask_feature_min_masks
# ctc loss
__UpperCAmelCase =ctc_loss_reduction
__UpperCAmelCase =ctc_zero_infinity
# sequence classification
__UpperCAmelCase =use_weighted_layer_sum
__UpperCAmelCase =classifier_proj_size
@property
def A__ (self):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 712
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase_ = get_logger()
UpperCamelCase_ = None
class _SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase):
'''simple docstring'''
super().__init__(features=UpperCAmelCase)
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase):
raise ValueError(
f"""Expected {device} to be a `str` not {type(UpperCAmelCase)}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__UpperCAmelCase =device if isinstance(UpperCAmelCase , UpperCAmelCase) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default """
f"""device: {str(jax.devices()[0])}.""")
__UpperCAmelCase =str(jax.devices()[0])
__UpperCAmelCase =jnp_array_kwargs
@staticmethod
def A__ ():
'''simple docstring'''
import jax
return {str(UpperCAmelCase): device for device in jax.devices()}
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase) and column:
if all(
isinstance(UpperCAmelCase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(UpperCAmelCase , axis=0)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase))):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__UpperCAmelCase ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
else:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__UpperCAmelCase ={'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image):
__UpperCAmelCase =np.asarray(UpperCAmelCase)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs})
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(UpperCAmelCase , '''__array__''') and not isinstance(UpperCAmelCase , jax.Array):
__UpperCAmelCase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
elif isinstance(UpperCAmelCase , (list, tuple)):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
return self._tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_row(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_row(UpperCAmelCase)
return self.recursive_tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_column(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0])
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
__UpperCAmelCase =self._consolidate(UpperCAmelCase)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_batch(UpperCAmelCase)
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
for column_name in batch:
__UpperCAmelCase =self._consolidate(batch[column_name])
return batch
| 142
| 0
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE = TypeVar('T')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , _A = True) -> str:
"""simple docstring"""
_UpperCAmelCase : dict[T, list[T]] = {} # dictionary of lists
_UpperCAmelCase : int = directed
def snake_case__ ( self , _A , _A) -> Tuple:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
self.adj_list[destination_vertex].append(_UpperCAmelCase)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
_UpperCAmelCase : Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCAmelCase)
_UpperCAmelCase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_UpperCAmelCase : Tuple = [destination_vertex]
_UpperCAmelCase : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
_UpperCAmelCase : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_UpperCAmelCase : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_UpperCAmelCase : Any = [destination_vertex]
_UpperCAmelCase : List[Any] = []
return self
def __repr__( self) -> int:
"""simple docstring"""
return pformat(self.adj_list)
| 485
|
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase )
| 687
| 0
|
'''simple docstring'''
class a :
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Any = set_counts
snake_case__ : int = max(UpperCamelCase_ )
snake_case__ : List[str] = len(UpperCamelCase_ )
snake_case__ : Any = [1] * num_sets
snake_case__ : Any = list(range(UpperCamelCase_ ) )
def __magic_name__ ( self : int , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Optional[int] = self.get_parent(UpperCamelCase_ )
snake_case__ : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
snake_case__ : Optional[Any] = 0
snake_case__ : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
snake_case__ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = src_parent
snake_case__ : Dict = self.set_counts[src_parent]
snake_case__ : Dict = max(self.max_set , UpperCamelCase_ )
return True
def __magic_name__ ( self : Optional[Any] , snake_case_ : Union[str, Any] ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
snake_case__ : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 710
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , '''width_multiplier''' ) )
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict=1_3 , snake_case_ : Any=6_4 , snake_case_ : Dict=2 , snake_case_ : Optional[int]=3 , snake_case_ : str="swish" , snake_case_ : str=3 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Any=0.0_2 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Dict=1_0 , snake_case_ : Optional[int]=None , snake_case_ : str=0.2_5 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=0.0 , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Tuple = make_divisible(5_1_2 * width_multiplier , divisor=8 )
snake_case__ : Optional[int] = hidden_act
snake_case__ : int = conv_kernel_size
snake_case__ : Optional[int] = output_stride
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = use_labels
snake_case__ : Optional[Any] = is_training
snake_case__ : int = num_labels
snake_case__ : str = initializer_range
snake_case__ : Dict = scope
snake_case__ : Tuple = width_multiplier
snake_case__ : Optional[Any] = ffn_dropout
snake_case__ : Dict = attn_dropout
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = MobileViTVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Optional[Any] = MobileViTVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : int , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : Any = MobileViTVaForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : str = MobileViTVaModelTester(self )
snake_case__ : Union[str, Any] = MobileViTVaConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(snake_case_ )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Union[str, Any] = outputs.hidden_states
snake_case__ : Any = 5
self.assertEqual(len(snake_case_ ) , snake_case_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Dict = 2
for i in range(len(snake_case_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : int = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = MobileViTVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
snake_case_ )
snake_case__ : Any = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Tuple = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : Any = model.to(snake_case_ )
snake_case__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**snake_case_ )
snake_case__ : Tuple = outputs.logits
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=snake_case_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : List[str] = model.to(snake_case_ )
snake_case__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
snake_case__ : str = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_0, 6_0)] )
snake_case__ : int = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , snake_case_ )
snake_case__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
snake_case__ : Any = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 502
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCamelCase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def A__ ( __lowerCAmelCase : Optional[int] ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
lowerCamelCase__ = [trans(img.convert("""RGB""" ) ) for img in image]
lowerCamelCase__ = torch.stack(__lowerCAmelCase )
return image
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# get the original timestep using init_timestep
lowerCamelCase__ = min(int(num_inference_steps * strength ) ,_lowerCAmelCase )
lowerCamelCase__ = max(num_inference_steps - init_timestep ,0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ):
if not isinstance(_lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = image.to(device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ = init_latents.shape
lowerCamelCase__ = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
# get latents
print("""add noise to latents at timestep""" ,_lowerCAmelCase )
lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = 0.8 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = None ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,):
self.check_inputs(_lowerCAmelCase )
# 2. Preprocess image
lowerCamelCase__ = preprocess(_lowerCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase ,device=self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(_lowerCAmelCase ,_lowerCAmelCase ,self.device )
lowerCamelCase__ = timesteps[:1].repeat(_lowerCAmelCase )
# 4. Prepare latent variables
lowerCamelCase__ = self.prepare_latents(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,self.unet.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(_lowerCAmelCase ):
# 1. predict noise model_output
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,eta=_lowerCAmelCase ,use_clipped_model_output=_lowerCAmelCase ,generator=_lowerCAmelCase ,).prev_sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowerCAmelCase )
| 50
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = OpenAIGPTTokenizer
a_ = OpenAIGPTTokenizerFast
a_ = True
a_ = False
def snake_case__ ( self : Dict ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def snake_case__ ( self : Optional[Any] , __a : Optional[Any] ) -> List[Any]:
return "lower newer", "lower newer"
def snake_case__ ( self : Any ) -> Optional[int]:
__UpperCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase = '''lower'''
__UpperCAmelCase = ['''low''', '''er</w>''']
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokens + ['''<unk>''']
__UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case__ ( self : Tuple , __a : int=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__UpperCAmelCase = '''This is a simple input'''
__UpperCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCAmelCase = ('''This is a simple input''', '''This is a pair''')
__UpperCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( UpperCAmelCase ):
pass
| 719
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654
| 0
|
from __future__ import annotations
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCamelCase = []
for num in range(len(__snake_case ) ):
_UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _snake_case ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 10
|
import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int ,*snake_case : Dict ,**snake_case : List[Any] ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' ,snake_case ,)
super().__init__(*snake_case ,**snake_case )
| 252
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a_ :
"""simple docstring"""
def __init__( self : Tuple ,snake_case : List[str] ,snake_case : List[str]=13 ,snake_case : Optional[Any]=7 ,snake_case : Union[str, Any]=False ,snake_case : str=True ,snake_case : Tuple=False ,snake_case : List[Any]=True ,snake_case : Tuple=33 ,snake_case : Dict=32 ,snake_case : str=5 ,snake_case : str=4 ,snake_case : int=37 ,snake_case : int="gelu" ,snake_case : int=0.1 ,snake_case : Dict=0.1 ,snake_case : int=512 ,snake_case : Optional[Any]=16 ,snake_case : List[Any]=2 ,snake_case : Tuple=0.02 ,snake_case : int=3 ,snake_case : Tuple=4 ,snake_case : List[str]=None ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : str ):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Union[str, Any] ,snake_case : Tuple ,snake_case : List[Any] ,snake_case : List[str] ,snake_case : str ):
SCREAMING_SNAKE_CASE =EsmModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : int ,snake_case : str ,snake_case : Tuple ,snake_case : List[str] ,snake_case : Any ,snake_case : Any ):
SCREAMING_SNAKE_CASE =EsmForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : str ,snake_case : str ,snake_case : Optional[Any] ,snake_case : Any ,snake_case : List[Any] ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =EsmForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = False
__UpperCAmelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = ()
__UpperCAmelCase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =EsmModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE =create_position_ids_from_input_ids(snake_case ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.empty(2 ,4 ,30 )
SCREAMING_SNAKE_CASE =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE =torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE =embeddings.create_position_ids_from_inputs_embeds(snake_case )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCAmelCase ( self : Optional[int] ):
pass
@require_torch
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =33
SCREAMING_SNAKE_CASE =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
| 252
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
snake_case_ : Tuple = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 595
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case_ : Any = """."""
if __name__ == "__main__":
snake_case_ : List[str] = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
snake_case_ : Any = []
snake_case_ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case_ : List[Any] = line.strip()
snake_case_ : List[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case_ : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 595
| 1
|
"""simple docstring"""
def A_ (__a ):
'''simple docstring'''
def merge(__a , __a ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__a ) <= 1:
return collection
A_ = len(__a ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 482
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase_ : Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase_ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase_ : str = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase_ : Dict = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def A_ (__a ):
'''simple docstring'''
A_ = None
# source code of `config_class`
A_ = inspect.getsource(__a )
A_ = _re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A_ = ckpt_name
break
return checkpoint
def A_ ():
'''simple docstring'''
A_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A_ = get_checkpoint_from_config_class(__a )
A_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
A_ = "\n".join(sorted(__a ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 482
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 584
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( _A, _A, _A, _A, _A ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(_A ) as metadata_file:
snake_case_ :Union[str, Any] = json.load(_A )
snake_case_ :List[Any] = LukeConfig(use_entity_aware_attention=_A, **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case_ :List[Any] = torch.load(_A, map_location="cpu" )
# Load the entity vocab file
snake_case_ :str = load_entity_vocab(_A )
snake_case_ :List[str] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ :Tuple = AddedToken("<ent>", lstrip=_A, rstrip=_A )
snake_case_ :int = AddedToken("<ent2>", lstrip=_A, rstrip=_A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_A )
with open(os.path.join(_A, LukeTokenizer.vocab_files_names["entity_vocab_file"] ), "w" ) as f:
json.dump(_A, _A )
snake_case_ :Tuple = LukeTokenizer.from_pretrained(_A )
# Initialize the embeddings of the special tokens
snake_case_ :Dict = state_dict["embeddings.word_embeddings.weight"]
snake_case_ :Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
snake_case_ :Tuple = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
snake_case_ :Any = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ :Any = F'''encoder.layer.{layer_index}.attention.self.'''
snake_case_ :Optional[int] = state_dict[prefix + matrix_name]
snake_case_ :Dict = state_dict[prefix + matrix_name]
snake_case_ :Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ :Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case_ :List[str] = entity_emb[entity_vocab["[MASK]"]]
snake_case_ :int = LukeModel(config=_A ).eval()
snake_case_ , snake_case_ :int = model.load_state_dict(_A, strict=_A )
if not (len(_A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(_A )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
snake_case_ :Any = LukeTokenizer.from_pretrained(_A, task="entity_classification" )
snake_case_ :Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
snake_case_ :Dict = (39, 42)
snake_case_ :Any = tokenizer(_A, entity_spans=[span], add_prefix_space=_A, return_tensors="pt" )
snake_case_ :Tuple = model(**_A )
# Verify word hidden states
if model_size == "large":
snake_case_ :Tuple = torch.Size((1, 42, 1_024) )
snake_case_ :Dict = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
snake_case_ :Tuple = torch.Size((1, 42, 768) )
snake_case_ :Dict = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], _A, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
snake_case_ :List[str] = torch.Size((1, 1, 1_024) )
snake_case_ :Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
snake_case_ :Optional[int] = torch.Size((1, 1, 768) )
snake_case_ :List[str] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], _A, atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_A ) )
model.save_pretrained(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :List[Any] = {}
with open(_A, "r", encoding="utf-8" ) as f:
for index, line in enumerate(_A ):
snake_case_ , snake_case_ :Tuple = line.rstrip().split("\t" )
snake_case_ :Dict = index
return entity_vocab
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 584
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
A__ : Dict = logging.get_logger(__name__)
A__ : int = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''dpt'''
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=384 , A_=16 , A_=3 , A_=False , A_=True , A_=[2, 5, 8, 11] , A_="project" , A_=[4, 2, 1, 0.5] , A_=[96, 192, 384, 768] , A_=256 , A_=-1 , A_=False , A_=True , A_=0.4 , A_=255 , A_=0.1 , A_=[1, 1024, 24, 24] , A_=[0, 1] , A_=None , **A_ , ) -> int:
"""simple docstring"""
super().__init__(**A_ )
_lowercase: Union[str, Any] = hidden_size
_lowercase: str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowercase: Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_lowercase: List[str] = BitConfig(**A_ )
elif isinstance(A_ , A_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowercase: Dict = BitConfig(**A_ )
elif isinstance(A_ , A_ ):
_lowercase: str = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_lowercase: Dict = backbone_featmap_shape
_lowercase: Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_lowercase: Any = None
_lowercase: str = None
_lowercase: Optional[Any] = []
_lowercase: Tuple = num_hidden_layers
_lowercase: Optional[int] = num_attention_heads
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_act
_lowercase: Union[str, Any] = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: Dict = initializer_range
_lowercase: Dict = layer_norm_eps
_lowercase: Any = image_size
_lowercase: Optional[int] = patch_size
_lowercase: Optional[int] = num_channels
_lowercase: int = qkv_bias
_lowercase: List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_lowercase: Union[str, Any] = readout_type
_lowercase: Optional[Any] = reassemble_factors
_lowercase: List[str] = neck_hidden_sizes
_lowercase: Tuple = fusion_hidden_size
_lowercase: int = head_in_index
_lowercase: Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowercase: int = use_auxiliary_head
_lowercase: Dict = auxiliary_loss_weight
_lowercase: List[Any] = semantic_loss_ignore_index
_lowercase: Dict = semantic_classifier_dropout
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowercase: List[str] = self.backbone_config.to_dict()
_lowercase: List[Any] = self.__class__.model_type
return output
| 272
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
A__ : List[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
A__ : Any = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[int] = set()
_lowercase: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase: Tuple = char
_lowercase: Optional[Any] = set(_UpperCamelCase )
return pairs
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
_lowercase: List[Any] = vocab_file
_lowercase: Union[str, Any] = merges_file
_lowercase: int = {}
_lowercase: Optional[int] = 0
_lowercase: Optional[Any] = 1
_lowercase: List[str] = 2
_lowercase: List[Any] = 3
self.add_from_file(A_ )
_lowercase: Tuple = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
_lowercase: int = merges_handle.read().split('''\n''' )[:-1]
_lowercase: Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
_lowercase: List[str] = dict(zip(A_ , range(len(A_ ) ) ) )
_lowercase: Union[str, Any] = {}
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase: Optional[Any] = [self.cls_token_id]
_lowercase: Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowercase: Tuple = [self.sep_token_id]
_lowercase: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def lowercase_ ( self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase: List[Any] = tuple(A_ )
_lowercase: Tuple = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_lowercase: List[Any] = get_pairs(A_ )
if not pairs:
return token
while True:
_lowercase: List[str] = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase: Optional[int] = bigram
_lowercase: Tuple = []
_lowercase: List[str] = 0
while i < len(A_ ):
try:
_lowercase: Tuple = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase: List[Any] = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase: Union[str, Any] = tuple(A_ )
_lowercase: int = new_word
if len(A_ ) == 1:
break
else:
_lowercase: List[Any] = get_pairs(A_ )
_lowercase: Tuple = '''@@ '''.join(A_ )
_lowercase: Dict = word[:-4]
_lowercase: Tuple = word
return word
def lowercase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Dict = []
_lowercase: str = re.findall(R'''\S+\n?''' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def lowercase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = ''' '''.join(A_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase: List[Any] = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase: str = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def lowercase_ ( self , A_ ) -> str:
"""simple docstring"""
if isinstance(A_ , A_ ):
try:
with open(A_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
_lowercase: Dict = f.readlines()
for lineTmp in lines:
_lowercase: List[Any] = lineTmp.strip()
_lowercase: Optional[Any] = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
_lowercase: List[str] = line[:idx]
_lowercase: List[str] = len(self.encoder )
| 272
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowerCAmelCase = ''
_lowerCAmelCase = ''
_lowerCAmelCase = ''
_lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase ( ) -> None:
lowercase , lowercase : int = get_dataset(_A , _A )
print("""Processing...""" )
lowercase , lowercase , lowercase : int = update_image_and_anno(_A , _A , _A )
for index, image in enumerate(_A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase : Union[str, Any] = random_chars(32 )
lowercase : List[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowercase : Tuple = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , _A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(_A )} with {file_name}""" )
lowercase : Union[str, Any] = []
for anno in new_annos[index]:
lowercase : Dict = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(_A )
with open(F"""/{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def UpperCamelCase ( _A , _A ) -> tuple[list, list]:
lowercase : List[str] = []
lowercase : List[str] = []
for label_file in glob.glob(os.path.join(_A , """*.txt""" ) ):
lowercase : Optional[int] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(_A ) as in_file:
lowercase : Dict = in_file.readlines()
lowercase : str = os.path.join(_A , F"""{label_name}.jpg""" )
lowercase : Union[str, Any] = []
for obj_list in obj_lists:
lowercase : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_A )
labels.append(_A )
return img_paths, labels
def UpperCamelCase ( _A , _A , _A = 1 ) -> tuple[list, list, list]:
lowercase : Optional[Any] = []
lowercase : Optional[Any] = []
lowercase : Optional[int] = []
for idx in range(len(_A ) ):
lowercase : int = []
lowercase : Any = img_list[idx]
path_list.append(_A )
lowercase : List[str] = anno_list[idx]
lowercase : int = cva.imread(_A )
if flip_type == 1:
lowercase : Optional[Any] = cva.flip(_A , _A )
for bbox in img_annos:
lowercase : Optional[int] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase : Optional[int] = cva.flip(_A , _A )
for bbox in img_annos:
lowercase : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_A )
new_imgs_list.append(_A )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase ( _A = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowercase : List[str] = ascii_lowercase + digits
return "".join(random.choice(_A ) for _ in range(_A ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 264
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCamelCase :
def __init__( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any]=None , __magic_name__ :List[Any]=None , __magic_name__ :Dict=None , __magic_name__ :List[Any]="resnet50" , __magic_name__ :Tuple=3 , __magic_name__ :Optional[Any]=32 , __magic_name__ :str=3 , __magic_name__ :str=True , __magic_name__ :Optional[Any]=True , ) ->str:
lowercase : Tuple = parent
lowercase : Tuple = out_indices if out_indices is not None else [4]
lowercase : Union[str, Any] = stage_names
lowercase : Tuple = out_features
lowercase : Optional[int] = backbone
lowercase : Optional[Any] = batch_size
lowercase : Tuple = image_size
lowercase : Union[str, Any] = num_channels
lowercase : List[Any] = use_pretrained_backbone
lowercase : str = is_training
def __snake_case ( self :List[Any] ) ->Optional[Any]:
lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[int] = self.get_config()
return config, pixel_values
def __snake_case ( self :Tuple ) ->int:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __snake_case ( self :Any , __magic_name__ :List[str] , __magic_name__ :Optional[int] ) ->Tuple:
lowercase : List[str] = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowercase : str = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __snake_case ( self :str ) ->Dict:
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase , lowercase : List[Any] = config_and_inputs
lowercase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCamelCase (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __snake_case ( self :Dict ) ->List[str]:
lowercase : List[str] = TimmBackboneModelTester(self )
lowercase : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self :Union[str, Any] ) ->Optional[Any]:
lowercase : Dict = """resnet18"""
lowercase : int = """microsoft/resnet-18"""
lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
lowercase : int = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowercase : Tuple = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
lowercase : Any = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __snake_case ( self :Tuple ) ->Optional[int]:
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __snake_case ( self :Optional[Any] ) ->int:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->Tuple:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __snake_case ( self :List[Any] ) ->List[str]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Any:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :int ) ->int:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :str ) ->Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Optional[int]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :List[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __snake_case ( self :Union[str, Any] ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __snake_case ( self :Tuple ) ->List[Any]:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __snake_case ( self :List[Any] ) ->int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
pass
def __snake_case ( self :Union[str, Any] ) ->Union[str, Any]:
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(__magic_name__ )
lowercase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :Any ) ->List[str]:
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = True
lowercase : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowercase : Union[str, Any] = self.all_model_classes[0]
lowercase : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
lowercase : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowercase : Dict = model(**__magic_name__ )
lowercase : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowercase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowercase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __snake_case ( self :Any ) ->List[Any]:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Any = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowercase : List[Any] = copy.deepcopy(__magic_name__ )
lowercase : Dict = None
lowercase : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Optional[Any] = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowercase : str = copy.deepcopy(__magic_name__ )
lowercase : int = False
lowercase : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Dict = model(**__magic_name__ )
| 264
| 1
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__lowerCamelCase : List[Any] = logging.getLogger(__name__)
__lowerCamelCase : Union[str, Any] = """Hello world! cécé herlolip"""
__lowerCamelCase : Optional[Any] = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = BertAbsConfig(
temp_dir='''.''' , finetune_bert=SCREAMING_SNAKE_CASE_ , large=SCREAMING_SNAKE_CASE_ , share_emb=SCREAMING_SNAKE_CASE_ , use_bert_emb=SCREAMING_SNAKE_CASE_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCamelCase_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE_ , lambda __UpperCAmelCase , __UpperCAmelCase : storage )
lowerCamelCase_ : int = AbsSummarizer(SCREAMING_SNAKE_CASE_ , torch.device('''cpu''' ) , SCREAMING_SNAKE_CASE_ )
original.eval()
lowerCamelCase_ : str = BertAbsSummarizer(SCREAMING_SNAKE_CASE_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
lowerCamelCase_ : Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
lowerCamelCase_ : int = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE_ )) )
lowerCamelCase_ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowerCamelCase_ : Dict = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE_ )) )
lowerCamelCase_ : Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCamelCase_ : List[str] = encoder_input_ids
lowerCamelCase_ : Optional[Any] = decoder_input_ids
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : int = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCamelCase_ : List[Any] = original(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ : Optional[Any] = original.generator(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ : Dict = new_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ : str = new_model.generator(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ : Any = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ : List[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ : Any = torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 703
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowerCamelCase : Any = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowerCamelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(UpperCamelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ : List[str] = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
lowerCamelCase_ : Any = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(UpperCamelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ : Optional[int] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , UpperCamelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
| 418
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase : List[Any] = logging.get_logger(__name__)
class _a (a__ ):
'''simple docstring'''
def __init__( self ,*__a ,**__a ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" ,__a ,)
super().__init__(*__a ,**__a )
| 116
|
'''simple docstring'''
from __future__ import annotations
class _a :
'''simple docstring'''
def __init__( self ,__a = 0 ) -> str:
snake_case : List[Any] = key
def snake_case_ ( self ,__a ,__a ) -> list[str]:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__a ) ^ key ) for ch in content]
def snake_case_ ( self ,__a ,__a ) -> list[str]:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__a ) ^ key ) for ch in content]
def snake_case_ ( self ,__a ,__a = 0 ) -> str:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case : List[str] = """"""
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def snake_case_ ( self ,__a ,__a = 0 ) -> str:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
snake_case : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case : List[str] = """"""
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def snake_case_ ( self ,__a ,__a = 0 ) -> bool:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
try:
with open(__a ) as fin, open("""encrypt.out""" ,"""w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__a ,__a ) )
except OSError:
return False
return True
def snake_case_ ( self ,__a ,__a ) -> bool:
assert isinstance(__a ,__a ) and isinstance(__a ,__a )
try:
with open(__a ) as fin, open("""decrypt.out""" ,"""w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__a ,__a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 116
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
lowerCamelCase__ : Dict = sorted(numsa + numsa )
lowerCamelCase__ : Dict = divmod(len(lowerCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase : str = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 706
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Dict = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase ) , torch_builtin(UpperCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase ) , gelu_new(UpperCAmelCase ) ) )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : str = get_activation('gelu' )
lowerCamelCase__ : Tuple = get_activation('gelu_10' )
lowerCamelCase__ : Tuple = torch_builtin(UpperCAmelCase )
lowerCamelCase__ : List[str] = geluaa(UpperCAmelCase )
lowerCamelCase__ : Tuple = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ) -> List[str]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(UpperCAmelCase ):
get_activation('bogus' )
with self.assertRaises(UpperCAmelCase ):
get_activation(UpperCAmelCase )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : str = acta.a
| 188
| 0
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def lowercase__ ( __lowercase : Optional[int] , __lowercase : str ) -> str:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__UpperCamelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def lowercase__ ( __lowercase : Optional[int] , __lowercase : DatasetInfo ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
__UpperCamelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , 'dataset_info.json' ) )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__UpperCamelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__UpperCamelCase = yaml.safe_dump(__lowercase )
__UpperCamelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCamelCase = DatasetInfo()
__UpperCamelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowercase__ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
__UpperCamelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__UpperCamelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__UpperCamelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , 'README.md' ) )
| 399
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] ={
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] =['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] =['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] =[
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=99 , UpperCamelCase_=0 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_="last" , UpperCamelCase_=None , UpperCamelCase_=None , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Any = use_input_lengths
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Union[str, Any] = gelu_activation
__UpperCAmelCase : Tuple = sinusoidal_embeddings
__UpperCAmelCase : str = causal
__UpperCAmelCase : Union[str, Any] = asm
__UpperCAmelCase : Optional[int] = n_langs
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = n_special
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Union[str, Any] = num_choices
__UpperCAmelCase : Union[str, Any] = summary_type
__UpperCAmelCase : Dict = use_proj
__UpperCAmelCase : Dict = scope
def _snake_case ( self ):
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_lengths:
__UpperCAmelCase : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase : str = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
__UpperCAmelCase : int = model(UpperCamelCase_ , langs=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : List[str] = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase_ )
__UpperCAmelCase : Dict = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : Dict = model(UpperCamelCase_ )
__UpperCAmelCase : str = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
__UpperCAmelCase : Dict = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
((__UpperCAmelCase) , ) : str = result_with_labels.to_tuple()
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
((__UpperCAmelCase) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase_ )
__UpperCAmelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : Optional[Any] = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = self.num_choices
__UpperCAmelCase : Dict = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Tuple = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case :Optional[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
__UpperCAmelCase : Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
__UpperCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = FlaubertModelTester(self )
__UpperCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def _snake_case ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = model_class(config=UpperCamelCase_ )
__UpperCAmelCase : Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.jit.trace(
UpperCamelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , "traced_model.pt" ) )
__UpperCAmelCase : Dict = torch.jit.load(os.path.join(UpperCamelCase_ , "traced_model.pt" ) , map_location=UpperCamelCase_ )
loaded(inputs_dict["input_ids"].to(UpperCamelCase_ ) , inputs_dict["attention_mask"].to(UpperCamelCase_ ) )
@require_torch
class __A (unittest.TestCase ):
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__UpperCAmelCase : int = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__UpperCAmelCase : Any = model(UpperCamelCase_ )[0]
__UpperCAmelCase : List[Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCamelCase_ )
__UpperCAmelCase : int = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 10
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10
| 1
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _A ( __lowercase ):
lowercase__: Any = (EulerDiscreteScheduler,)
lowercase__: Optional[Any] = 10
def lowercase__ ( self : str , **__magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__magic_name__ )
return config
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : int = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
__snake_case : List[Any] = torch.manual_seed(0 )
__snake_case : Union[str, Any] = self.dummy_model()
__snake_case : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : Dict = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : Union[str, Any] = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
__snake_case : Union[str, Any] = output.prev_sample
__snake_case : Union[str, Any] = torch.sum(torch.abs(__magic_name__ ) )
__snake_case : str = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__snake_case : Optional[int] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : List[str] = self.dummy_model()
__snake_case : int = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : Union[str, Any] = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : Union[str, Any] = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
__snake_case : str = model(__magic_name__ , __magic_name__ )
__snake_case : Any = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
__snake_case : Optional[int] = output.prev_sample
__snake_case : List[Any] = torch.sum(torch.abs(__magic_name__ ) )
__snake_case : int = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : int = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config()
__snake_case : int = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
__snake_case : Dict = torch.manual_seed(0 )
__snake_case : Union[str, Any] = self.dummy_model()
__snake_case : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__snake_case : Any = sample.to(__magic_name__ )
for t in scheduler.timesteps:
__snake_case : Union[str, Any] = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
__snake_case : Optional[int] = model(__magic_name__ , __magic_name__ )
__snake_case : Optional[int] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
__snake_case : Optional[int] = output.prev_sample
__snake_case : List[Any] = torch.sum(torch.abs(__magic_name__ ) )
__snake_case : List[Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = self.scheduler_classes[0]
__snake_case : Optional[int] = self.get_scheduler_config()
__snake_case : int = scheduler_class(**__magic_name__ , use_karras_sigmas=__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
__snake_case : Dict = torch.manual_seed(0 )
__snake_case : int = self.dummy_model()
__snake_case : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__snake_case : int = sample.to(__magic_name__ )
for t in scheduler.timesteps:
__snake_case : Any = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , __magic_name__ )
__snake_case : int = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
__snake_case : Tuple = output.prev_sample
__snake_case : str = torch.sum(torch.abs(__magic_name__ ) )
__snake_case : Tuple = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
| 26
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(_UpperCamelCase )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(_UpperCamelCase , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = load_checkpoint(_UpperCamelCase )
if config is not None:
__lowercase = OPTConfig.from_pretrained(_UpperCamelCase )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
a : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 639
| 0
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def snake_case_ ( A_ : Iterable[str], A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = iter(A_ )
while True:
_lowerCamelCase : Any = tuple(itertools.islice(A_, A_ ) )
if not chunk:
return
yield chunk
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_lowerCamelCase : Any = ''''''
if len(A_ ) < 2:
return dirty
for i in range(len(A_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(A_ ) & 1:
clean += "X"
return clean
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_lowerCamelCase : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(A_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(A_ )
return table
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = generate_table(A_ )
_lowerCamelCase : Any = prepare_input(A_ )
_lowerCamelCase : Tuple = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A_, 2 ):
_lowerCamelCase : Optional[Any] = divmod(table.index(A_ ), 5 )
_lowerCamelCase : Tuple = divmod(table.index(A_ ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = generate_table(A_ )
_lowerCamelCase : Any = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A_, 2 ):
_lowerCamelCase : Optional[int] = divmod(table.index(A_ ), 5 )
_lowerCamelCase : Union[str, Any] = divmod(table.index(A_ ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 708
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase):
@parameterized.expand([(None,), ('''foo.json''',)] )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
_lowerCamelCase : int = GenerationConfig.from_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoConfig.from_pretrained('''gpt2''' )
_lowerCamelCase : List[Any] = GenerationConfig.from_model_config(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = GenerationConfig()
_lowerCamelCase : Any = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
_lowerCamelCase : Optional[Any] = copy.deepcopy(__lowerCAmelCase )
_lowerCamelCase : List[str] = generation_config.update(**__lowerCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCAmelCase , {'''foo''': '''bar'''} )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig()
_lowerCamelCase : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
_lowerCamelCase : Any = GenerationConfig.from_model_config(__lowerCAmelCase )
assert not hasattr(__lowerCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''test-generation-config''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
| 598
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self : List[Any] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A , config_name=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_pretrained(_A , config_name=_A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained('''gpt2''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_model_config(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_A , _A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = GenerationConfig()
__SCREAMING_SNAKE_CASE : Any = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(_A )
__SCREAMING_SNAKE_CASE : Any = generation_config.update(**_A )
# update_kwargs was not modified (no side effects)
self.assertEqual(_A , _A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = GenerationConfig()
__SCREAMING_SNAKE_CASE : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained(_A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
__SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig.from_model_config(_A )
assert not hasattr(_A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _A )
self.assertEqual(default_config.num_beams , 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained(_A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A , repo_id='''test-generation-config''' , push_to_hub=_A , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_A , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
| 74
|
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( A__ : float , A__ : float , A__ : float ) -> tuple:
lowerCamelCase_ : Dict = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase : Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def __a ( A__ ) -> str:
for pegasus_name, hf_name in PATTERNS:
lowerCAmelCase = k.replace(A__ , A__ )
return k
def __a ( A__ , A__ ) -> PegasusForConditionalGeneration:
lowerCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(A__ )
lowerCAmelCase = PegasusConfig(**A__ )
lowerCAmelCase = PegasusForConditionalGeneration(A__ )
lowerCAmelCase = torch_model.model.state_dict()
lowerCAmelCase = {}
for k, v in tf_weights.items():
lowerCAmelCase = rename_state_dict_key(A__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
lowerCAmelCase = v.T
lowerCAmelCase = torch.tensor(A__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
lowerCAmelCase = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
lowerCAmelCase = mapping["shared.weight"]
lowerCAmelCase = mapping["shared.weight"]
lowerCAmelCase = {k: torch.zeros_like(A__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**A__ )
lowerCAmelCase , lowerCAmelCase = torch_model.model.load_state_dict(A__ , strict=A__ )
lowerCAmelCase = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def __a ( A__="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
lowerCAmelCase = tf.train.list_variables(A__ )
lowerCAmelCase = {}
lowerCAmelCase = ["Adafactor", "global_step"]
for name, shape in tqdm(A__ , desc="converting tf checkpoint to dict" ):
lowerCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase = tf.train.load_variable(A__ , A__ )
lowerCAmelCase = array
return tf_weights
def __a ( A__ , A__ ) -> Dict:
# save tokenizer first
lowerCAmelCase = Path(A__ ).parent.name
lowerCAmelCase = task_specific_params[f"summarization_{dataset}"]["max_position_embeddings"]
lowerCAmelCase = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=A__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(A__ )
# convert model
lowerCAmelCase = get_tf_weights_as_numpy(A__ )
lowerCAmelCase = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
lowerCAmelCase = task_specific_params
lowerCAmelCase = convert_pegasus(A__ , A__ )
torch_model.save_pretrained(A__ )
lowerCAmelCase = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(A__ , Path(A__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowercase : Optional[int] = parser.parse_args()
if args.save_dir is None:
lowercase : Tuple = Path(args.tf_ckpt_path).parent.name
lowercase : Tuple = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 703
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase : Optional[int] = random.Random()
def __a ( A__ , A__=1.0 , A__=None , A__=None ) -> Any:
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=7 , SCREAMING_SNAKE_CASE : Optional[Any]=4_0_0 , SCREAMING_SNAKE_CASE : Optional[Any]=2_0_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=1_6_0_0_0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=8_0 , SCREAMING_SNAKE_CASE : int=1_6 , SCREAMING_SNAKE_CASE : Any=6_4 , SCREAMING_SNAKE_CASE : List[Any]="hann_window" , SCREAMING_SNAKE_CASE : Dict=8_0 , SCREAMING_SNAKE_CASE : Any=7_6_0_0 , SCREAMING_SNAKE_CASE : Optional[Any]=1E-10 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = do_normalize
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = return_attention_mask
def __A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> str:
"""simple docstring"""
def _flatten(SCREAMING_SNAKE_CASE : List[Any] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Optional[int]=False ) -> str:
"""simple docstring"""
if equal_length:
lowerCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = SpeechTaFeatureExtractor
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = SpeechTaFeatureExtractionTester(self )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1E-3 ) )
def __A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __A ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCAmelCase = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __A ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def __A ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase = feature_extractor(audio_target=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE )
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = min(SCREAMING_SNAKE_CASE )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(
SCREAMING_SNAKE_CASE , padding="max_length" , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="np" )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = SpeechTaFeatureExtractor()
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , SCREAMING_SNAKE_CASE , atol=1E-6 ) )
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = SpeechTaFeatureExtractor()
lowerCAmelCase = feature_extractor(audio_target=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 159
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCAmelCase__ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
lowerCAmelCase__ = '''>>zh<<'''
lowerCAmelCase__ = '''Helsinki-NLP/'''
if is_torch_available():
lowerCAmelCase__ = '''pt'''
elif is_tf_available():
lowerCAmelCase__ = '''tf'''
else:
lowerCAmelCase__ = '''jax'''
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =MarianTokenizer
a : Union[str, Any] =False
a : Optional[int] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCAmelCase : int = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[str] = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = "</s>"
lowerCAmelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(snake_case__ ) , 9 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
lowerCAmelCase : List[str] = en_de_tokenizer(["I am a small frog"] , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(snake_case__ , batch.input_ids[0] )
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : List[str] = [x.name for x in Path(snake_case__ ).glob("*" )]
self.assertIn("source.spm" , snake_case__ )
MarianTokenizer.from_pretrained(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : Optional[int] = tok(
["I am a small frog" * 1_000, "I am a small frog"] , padding=snake_case__ , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : str = tok(["I am a tiny frog", "I am a small frog"] , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {"input_ids": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCAmelCase : Tuple = "Tämä on testi"
lowerCAmelCase : List[str] = "This is a test"
lowerCAmelCase : str = [76, 7, 2_047, 2]
lowerCAmelCase : List[str] = [69, 12, 11, 940, 2]
lowerCAmelCase : Optional[int] = tokenizer(snake_case__ ).input_ids
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = tokenizer(text_target=snake_case__ ).input_ids
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
| 645
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase__ = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
lowerCAmelCase__ = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
lowerCAmelCase__ = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=1 , snake_case__="binary" , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = fa_score(
snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ )
return {"f1": float(snake_case__ ) if score.size == 1 else score}
| 645
| 1
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase ( a_, a_, a_, a_=1024 ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = [], []
lowerCamelCase : List[str] = list(zip(a_, a_ ) )
lowerCamelCase , lowerCamelCase : Optional[int] = sorted_examples[0]
def is_too_big(a_ ):
return tok(a_, return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCamelCase : Any = new_src + ' ' + src
lowerCamelCase : Optional[int] = new_tgt + ' ' + tgt
if is_too_big(a_ ) or is_too_big(a_ ): # cant fit, finalize example
finished_src.append(a_ )
finished_tgt.append(a_ )
lowerCamelCase , lowerCamelCase : Union[str, Any] = src, tgt
else: # can fit, keep adding
lowerCamelCase , lowerCamelCase : str = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a_ )
finished_tgt.append(a_ )
return finished_src, finished_tgt
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = Path(a_ )
save_path.mkdir(exist_ok=a_ )
for split in ["train"]:
lowerCamelCase , lowerCamelCase : str = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
lowerCamelCase : str = [x.rstrip() for x in Path(a_ ).open().readlines()]
lowerCamelCase : str = [x.rstrip() for x in Path(a_ ).open().readlines()]
lowerCamelCase , lowerCamelCase : Union[str, Any] = pack_examples(a_, a_, a_, a_ )
print(F"""packed {split} split from {len(a_ )} examples -> {len(a_ )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(a_ ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(a_ ) )
for split in ["val", "test"]:
lowerCamelCase , lowerCamelCase : Optional[int] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(a_, save_path / F"""{split}.source""" )
shutil.copyfile(a_, save_path / F"""{split}.target""" )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--tok_name', type=a_, help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len', type=a_, default=128 )
parser.add_argument('--data_dir', type=a_ )
parser.add_argument('--save_path', type=a_ )
lowerCamelCase : List[Any] = parser.parse_args()
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a_, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 133
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase : Optional[Any] = controlnet_params
lowerCamelCase : Dict = 'bird'
lowerCamelCase : Optional[int] = jax.device_count()
lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase : int = jax.random.split(UpperCAmelCase_ , jax.device_count() )
lowerCamelCase : Union[str, Any] = replicate(UpperCAmelCase_ )
lowerCamelCase : Tuple = shard(UpperCAmelCase_ )
lowerCamelCase : Tuple = shard(UpperCAmelCase_ )
lowerCamelCase : Tuple = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : Any = images[0, 253:256, 253:256, -1]
lowerCamelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase , lowerCamelCase : Dict = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
lowerCamelCase : int = controlnet_params
lowerCamelCase : Dict = 'Chef in the kitchen'
lowerCamelCase : Dict = jax.device_count()
lowerCamelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase : int = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase : int = jax.random.PRNGKey(0 )
lowerCamelCase : str = jax.random.split(UpperCAmelCase_ , jax.device_count() )
lowerCamelCase : int = replicate(UpperCAmelCase_ )
lowerCamelCase : int = shard(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = shard(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
lowerCamelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 133
| 1
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[int]:
__lowerCAmelCase = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Dict:
__lowerCAmelCase = _split_gen_kwargs(lowercase , lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( lowercase , lowercase ) -> int:
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 689
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689
| 1
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase__ ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : int=13 , lowercase_ : str=7 , lowercase_ : Any=True , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : List[Any]=True , lowercase_ : List[str]=99 , lowercase_ : Optional[Any]=32 , lowercase_ : List[str]=5 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : int=512 , lowercase_ : Tuple=16 , lowercase_ : List[str]=2 , lowercase_ : int=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Any=4 , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=2 , lowercase_ : int=2 , lowercase_ : Dict=2 , lowercase_ : List[str]=4 , lowercase_ : str=1 , ) -> Any:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = q_groups
_UpperCamelCase = k_groups
_UpperCamelCase = v_groups
_UpperCamelCase = post_attention_groups
_UpperCamelCase = intermediate_groups
_UpperCamelCase = output_groups
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : str , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = SqueezeBertForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(_UpperCamelCase) = config_and_inputs
_UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__A = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = False
__A = True
__A = False
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , dim=37)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = SqueezeBertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
_UpperCamelCase = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]])
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = torch.Size((1, 3))
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = torch.tensor([[0.64_01, -0.03_49, -0.60_41]])
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-4))
| 703
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "facebook/bart-large-mnli"
lowerCamelCase_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase_ = "text_classifier"
lowerCamelCase_ = AutoTokenizer
lowerCamelCase_ = AutoModelForSequenceClassification
lowerCamelCase_ = ["text", ["text"]]
lowerCamelCase_ = ["text"]
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = self.model.config
SCREAMING_SNAKE_CASE__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
SCREAMING_SNAKE_CASE__ = int(__A )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = labels
return self.pre_processor(
[text] * len(__A ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _snake_case ( self :str , __A :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 6
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
a_ : Optional[int] = False
@skip_mps
class __UpperCamelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[Any] = StableDiffusionAttendAndExcitePipeline
_lowercase : str = False
_lowercase : List[str] = TEXT_TO_IMAGE_PARAMS
_lowercase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE , )
a__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
a__ = CLIPTextModel(SCREAMING_SNAKE_CASE )
a__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Dict:
if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a__ = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
a__ = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
a__ = a__ = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self ) -> str:
a__ = '''cpu'''
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
a__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
a__ = pipe(**SCREAMING_SNAKE_CASE ).images
a__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
a__ = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-3 )
def _UpperCAmelCase ( self ) -> str:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _UpperCAmelCase ( self ) -> int:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=5e-4 )
def _UpperCAmelCase ( self ) -> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
@classmethod
def _UpperCAmelCase ( cls ) -> str:
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
a__ = torch.manual_seed(5_1 )
a__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
a__ = '''a painting of an elephant with glasses'''
a__ = [5, 7]
a__ = pipe(
prompt=SCREAMING_SNAKE_CASE , token_indices=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
a__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 194
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__a : List[Any] = logging.get_logger(__name__)
__a : Any = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """layoutlmv3"""
def __init__( self , SCREAMING_SNAKE_CASE=50265 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(
vocab_size=SCREAMING_SNAKE_CASE , hidden_size=SCREAMING_SNAKE_CASE , num_hidden_layers=SCREAMING_SNAKE_CASE , num_attention_heads=SCREAMING_SNAKE_CASE , intermediate_size=SCREAMING_SNAKE_CASE , hidden_act=SCREAMING_SNAKE_CASE , hidden_dropout_prob=SCREAMING_SNAKE_CASE , attention_probs_dropout_prob=SCREAMING_SNAKE_CASE , max_position_embeddings=SCREAMING_SNAKE_CASE , type_vocab_size=SCREAMING_SNAKE_CASE , initializer_range=SCREAMING_SNAKE_CASE , layer_norm_eps=SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase = max_ad_position_embeddings
UpperCamelCase = coordinate_size
UpperCamelCase = shape_size
UpperCamelCase = has_relative_attention_bias
UpperCamelCase = rel_pos_bins
UpperCamelCase = max_rel_pos
UpperCamelCase = has_spatial_attention_bias
UpperCamelCase = rel_ad_pos_bins
UpperCamelCase = max_rel_ad_pos
UpperCamelCase = text_embed
UpperCamelCase = visual_embed
UpperCamelCase = input_size
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = classifier_dropout
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
return 1e-5
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return 12
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 40 , SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = processor.tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
UpperCamelCase = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase = self._generate_dummy_images(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(
processor(
SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE , boxes=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , ) )
return inputs
| 705
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FalconModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
UpperCamelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = FalconModel(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = FalconForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = FalconForCausalLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )["hidden_states"][0]
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )["hidden_states"][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (FalconForCausalLM,) if is_torch_available() else ()
lowercase = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = FalconModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCamelCase = alibi
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = FalconForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = "single_label_classification"
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = FalconForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = FalconForCausalLM(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = model._convert_to_rw_cache(result.past_key_values )
UpperCamelCase = model._convert_cache_to_standard_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for layer in range(len(SCREAMING_SNAKE_CASE ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = "multi_label_classification"
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = FalconForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(SCREAMING_SNAKE_CASE , "use_cache" ):
return
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
if "use_cache" not in inputs:
UpperCamelCase = True
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCamelCase = (
getattr(SCREAMING_SNAKE_CASE , "decoder_layers" , SCREAMING_SNAKE_CASE )
or getattr(SCREAMING_SNAKE_CASE , "num_decoder_layers" , SCREAMING_SNAKE_CASE )
or config.num_hidden_layers
)
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE , "num_kv_heads" , config.num_attention_heads )
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE , "d_model" , config.hidden_size )
UpperCamelCase = embed_dim // num_attention_heads
UpperCamelCase = outputs["past_key_values"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = inputs["input_ids"].shape
for i in range(SCREAMING_SNAKE_CASE ):
if config.new_decoder_architecture:
UpperCamelCase = config.num_attention_heads
elif config.multi_query:
UpperCamelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
UpperCamelCase = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
UpperCamelCase = model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=19 )
UpperCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )[0]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**SCREAMING_SNAKE_CASE , num_beams=2 , max_new_tokens=4 )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE )
model.eval()
model.to(device=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# Test results are the same with and without cache
UpperCamelCase = model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE )
UpperCamelCase = model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 414
| 0
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_UpperCAmelCase = logging.getLogger(__name__)
class a ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str]=None ) -> Tuple:
'''simple docstring'''
super().__init__(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: str =None
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
SCREAMING_SNAKE_CASE_: Optional[Any] =self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE_: List[Any] =str(distributed_port + 1 )
SCREAMING_SNAKE_CASE_: List[Any] =dist.new_group(ranks=lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]=torch.floataa ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =torch.empty(lowerCAmelCase , dtype=lowerCAmelCase )
dist.scatter(lowerCAmelCase , src=0 , scatter_list=lowerCAmelCase , group=self.process_group )
return target_tensor
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE_: Optional[int] =next((addr for addr in addrs if addr.startswith("""e""" )) , lowerCAmelCase )
return ifname
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase )
# distributed training
SCREAMING_SNAKE_CASE_: List[str] =dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE_: Dict =None
if self._is_main():
SCREAMING_SNAKE_CASE_: Optional[Any] =[torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase )]
dist.gather(torch.tensor(lowerCAmelCase ) , dst=0 , gather_list=lowerCAmelCase , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE_: int =question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE_: int =[]
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
if self._is_main():
assert len(lowerCAmelCase ) == world_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self._main_retrieve(torch.cat(lowerCAmelCase ).numpy() , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =torch.tensor(lowerCAmelCase ), torch.tensor(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self._scattered(lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE_: int =self._scattered(lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase )
| 409
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE_: List[Any] =128
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =12
SCREAMING_SNAKE_CASE_: Optional[Any] =12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE_: List[Any] =14
SCREAMING_SNAKE_CASE_: Any =14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE_: Dict =16
SCREAMING_SNAKE_CASE_: Optional[int] =16
else:
raise ValueError("""Model not supported""" )
SCREAMING_SNAKE_CASE_: Optional[int] ="""huggingface/label-files"""
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE_: Tuple =35
SCREAMING_SNAKE_CASE_: Tuple ="""speech-commands-v2-id2label.json"""
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =527
SCREAMING_SNAKE_CASE_: Optional[Any] ="""audioset-id2label.json"""
SCREAMING_SNAKE_CASE_: int =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Dict ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] =idalabel
SCREAMING_SNAKE_CASE_: Optional[int] ={v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( lowercase ):
if "module.v" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[Any] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Any =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[int] =int(key_split[3] )
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[int] =val[:dim, :]
SCREAMING_SNAKE_CASE_: str =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: int =val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Dict =val[:dim]
SCREAMING_SNAKE_CASE_: List[str] =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =val
return orig_state_dict
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =[
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_audio_spectrogram_transformer_config(lowercase )
SCREAMING_SNAKE_CASE_: int ={
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
SCREAMING_SNAKE_CASE_: List[Any] =model_name_to_url[model_name]
SCREAMING_SNAKE_CASE_: str =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )
# remove some keys
remove_keys(lowercase )
# rename some keys
SCREAMING_SNAKE_CASE_: Any =convert_state_dict(lowercase , lowercase )
# load 🤗 model
SCREAMING_SNAKE_CASE_: List[str] =ASTForAudioClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE_: Optional[Any] =-4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
SCREAMING_SNAKE_CASE_: Dict =4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
SCREAMING_SNAKE_CASE_: Any =1024 if """speech-commands""" not in model_name else 128
SCREAMING_SNAKE_CASE_: Optional[Any] =ASTFeatureExtractor(mean=lowercase , std=lowercase , max_length=lowercase )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE_: Dict =load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
SCREAMING_SNAKE_CASE_: Optional[int] =dataset[0]["""audio"""]["""array"""]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =torchaudio.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE_: Tuple =feature_extractor(lowercase , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
SCREAMING_SNAKE_CASE_: Tuple =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE_: Any =torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE_: int =torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 409
| 1
|
__UpperCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _snake_case ( ) -> None:
"""simple docstring"""
_lowerCAmelCase : List[Any] = input("Enter message: " )
_lowerCAmelCase : Optional[Any] = input("Enter key [alphanumeric]: " )
_lowerCAmelCase : Dict = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
_lowerCAmelCase : str = "encrypt"
_lowerCAmelCase : List[str] = encrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif mode.lower().startswith("d" ):
_lowerCAmelCase : Any = "decrypt"
_lowerCAmelCase : List[str] = decrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''\n{mode.title()}ed message:''' )
print(SCREAMING_SNAKE_CASE )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return translate_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "encrypt" )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return translate_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decrypt" )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[str] = key.upper()
for symbol in message:
_lowerCAmelCase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : Tuple = 0
else:
translated.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 503
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A__ ( A ):
"""simple docstring"""
_lowercase : List[str] = ''''''
_lowercase : Optional[Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Dict , A_ : Optional[DatasetInfo] = None , A_ : Optional[str] = None , **A_ : int , ):
'''simple docstring'''
super().__init__(self , **A_ )
_lowerCAmelCase : Union[str, Any] = repo_info
_lowerCAmelCase : Optional[int] = token
_lowerCAmelCase : str = None
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
if self.dir_cache is None:
_lowerCAmelCase : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(A_ ): {"name": str(A_ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Dict , A_ : str , A_ : str = "rb" , **A_ : Optional[int] , ):
'''simple docstring'''
if not isinstance(self.repo_info , A_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_lowerCAmelCase : str = hf_hub_url(self.repo_info.id , A_ , revision=self.repo_info.sha )
return fsspec.open(
A_ , mode=A_ , headers=get_authentication_headers_for_url(A_ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __magic_name__ ( self : Union[str, Any] , A_ : List[Any] , **A_ : str ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Optional[int] = self._strip_protocol(A_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A_ )
def __magic_name__ ( self : Any , A_ : str , A_ : Any=False , **A_ : Dict ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Dict = PurePosixPath(path.strip("/" ) )
_lowerCAmelCase : str = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : List[str] = PurePosixPath(p.strip("/" ) )
_lowerCAmelCase : Tuple = p.parent
if root == path:
_lowerCAmelCase : Union[str, Any] = f
_lowerCAmelCase : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 503
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None ) -> Optional[int]:
__lowerCamelCase : Tuple = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__ )
# The main config parser
__lowerCamelCase : List[Any] = config_command_parser(lowerCamelCase__ )
# The subparser to add commands to
__lowerCamelCase : List[Any] = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase__ , parents=[parent_parser] )
update_command_parser(lowerCamelCase__ , parents=[parent_parser] )
return config_parser
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
__lowerCamelCase : int = get_config_parser()
__lowerCamelCase : Any = config_parser.parse_args()
if not hasattr(lowerCamelCase__ , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 652
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( __A : Any , __A : Optional[int] , __A : List[Any] , __A : Union[str, Any] , __A : Optional[int] ) -> List[str]:
for attribute in key.split('''.''' ):
_UpperCAmelCase : int = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
_UpperCAmelCase : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
_UpperCAmelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCAmelCase : List[Any] = value
elif weight_type == "weight_g":
_UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
_UpperCAmelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCAmelCase : Dict = value
else:
_UpperCAmelCase : List[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCamelCase ( __A : int , __A : List[str] ) -> Union[str, Any]:
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Union[str, Any] = fairseq_model.state_dict()
_UpperCAmelCase : Dict = hf_model.feature_extractor
_UpperCAmelCase : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCAmelCase : List[Any] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCAmelCase : Optional[int] = True
if "*" in mapped_key:
_UpperCAmelCase : int = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_UpperCAmelCase : Union[str, Any] = mapped_key.replace('''*''' , _lowerCAmelCase )
if "weight_g" in name:
_UpperCAmelCase : List[str] = '''weight_g'''
elif "weight_v" in name:
_UpperCAmelCase : Optional[Any] = '''weight_v'''
elif "bias" in name:
_UpperCAmelCase : List[Any] = '''bias'''
elif "weight" in name:
_UpperCAmelCase : Dict = '''weight'''
else:
_UpperCAmelCase : List[str] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( __A : Tuple , __A : Optional[Any] , __A : Optional[int] , __A : Dict , __A : str ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = full_name.split('''conv_layers.''' )[-1]
_UpperCAmelCase : str = name.split('''.''' )
_UpperCAmelCase : Optional[int] = int(items[0] )
_UpperCAmelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCAmelCase : str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCAmelCase : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCAmelCase : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
def _lowerCamelCase ( __A : Optional[Any] , __A : Any , __A : Tuple , __A : str ) -> str:
_UpperCAmelCase : Optional[int] = full_name.split('''adaptor.''' )[-1]
_UpperCAmelCase : Any = name.split('''.''' )
if items[1].isdigit():
_UpperCAmelCase : List[Any] = int(items[1] )
else:
_UpperCAmelCase : Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCAmelCase : List[str] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCAmelCase : List[str] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCAmelCase : List[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCAmelCase : str = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCAmelCase : Any = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCAmelCase : Dict = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
def _lowerCamelCase ( __A : Optional[int] ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = emb.weight.shape
_UpperCAmelCase : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_UpperCAmelCase : Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase ( __A : Optional[Any] , __A : Any , __A : Union[str, Any] , __A : Optional[Any] , __A : int , __A : List[str] , __A : Optional[int] , __A : Tuple , __A : Optional[Any] , __A : List[Any] , __A : List[str] , ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(
_lowerCAmelCase , add_adapter=_lowerCAmelCase , adapter_stride=_lowerCAmelCase , adapter_kernel_size=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , output_hidden_size=_lowerCAmelCase , )
_UpperCAmelCase : List[Any] = MBartConfig.from_pretrained(_lowerCAmelCase )
# load model
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
_UpperCAmelCase : Optional[int] = model[0].eval()
# load feature extractor
_UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase , use_auth_token=_lowerCAmelCase )
# set weights for wav2vec2 encoder
_UpperCAmelCase : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
# load decoder weights
_UpperCAmelCase : List[str] = MBartForCausalLM(_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCAmelCase : Optional[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Union[str, Any] = MBartaaTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = hf_wavavec.config.to_dict()
_UpperCAmelCase : List[str] = tokenizer.pad_token_id
_UpperCAmelCase : List[Any] = tokenizer.bos_token_id
_UpperCAmelCase : Optional[int] = tokenizer.eos_token_id
_UpperCAmelCase : str = '''mbart50'''
_UpperCAmelCase : Optional[Any] = '''wav2vec2'''
_UpperCAmelCase : int = tokenizer.eos_token_id
_UpperCAmelCase : Any = 250_004
_UpperCAmelCase : Optional[int] = tokenizer.eos_token_id
_UpperCAmelCase : int = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 701
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __A : int , __A : Optional[Any] , __A : int ) -> int:
# Initialise PyTorch model
_UpperCAmelCase : Dict = RemBertConfig.from_json_file(__A )
print('''Building PyTorch model from configuration: {}'''.format(str(__A ) ) )
_UpperCAmelCase : int = RemBertModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__A , __A , __A )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__A ) )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 186
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class _lowercase:
"""simple docstring"""
def __init__( self: Dict ):
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
def snake_case ( self: Tuple ):
return self.head == self.tail
def snake_case ( self: Dict ,a: Any ):
self.data.append(a )
__UpperCAmelCase = self.tail + 1
def snake_case ( self: int ):
__UpperCAmelCase = self.data[self.head]
__UpperCAmelCase = self.head + 1
return ret
def snake_case ( self: Optional[Any] ):
return self.tail - self.head
def snake_case ( self: Any ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class _lowercase:
"""simple docstring"""
def __init__( self: Any ,a: Any ):
__UpperCAmelCase = data
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = 1
def snake_case ( self: int ):
return self.data
def snake_case ( self: Optional[int] ):
return self.left
def snake_case ( self: int ):
return self.right
def snake_case ( self: str ):
return self.height
def snake_case ( self: List[str] ,a: Any ):
__UpperCAmelCase = data
def snake_case ( self: Union[str, Any] ,a: MyNode | None ):
__UpperCAmelCase = node
def snake_case ( self: str ,a: MyNode | None ):
__UpperCAmelCase = node
def snake_case ( self: List[str] ,a: int ):
__UpperCAmelCase = height
def __snake_case ( lowerCAmelCase : MyNode | None ):
if node is None:
return 0
return node.get_height()
def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
if a > b:
return a
return b
def __snake_case ( lowerCAmelCase : MyNode ):
print('left rotation node:' , node.get_data() )
__UpperCAmelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase )
__UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase )
__UpperCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase )
return ret
def __snake_case ( lowerCAmelCase : MyNode ):
print('right rotation node:' , node.get_data() )
__UpperCAmelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase )
__UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase )
__UpperCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase )
return ret
def __snake_case ( lowerCAmelCase : MyNode ):
__UpperCAmelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase ) )
return right_rotation(lowerCAmelCase )
def __snake_case ( lowerCAmelCase : MyNode ):
__UpperCAmelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase ) )
return left_rotation(lowerCAmelCase )
def __snake_case ( lowerCAmelCase : MyNode | None , lowerCAmelCase : Any ):
if node is None:
return MyNode(lowerCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__UpperCAmelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__UpperCAmelCase = right_rotation(lowerCAmelCase )
else:
__UpperCAmelCase = lr_rotation(lowerCAmelCase )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__UpperCAmelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__UpperCAmelCase = rl_rotation(lowerCAmelCase )
else:
__UpperCAmelCase = left_rotation(lowerCAmelCase )
__UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase )
return node
def __snake_case ( lowerCAmelCase : MyNode ):
while True:
__UpperCAmelCase = root.get_right()
if right_child is None:
break
__UpperCAmelCase = right_child
return root.get_data()
def __snake_case ( lowerCAmelCase : MyNode ):
while True:
__UpperCAmelCase = root.get_left()
if left_child is None:
break
__UpperCAmelCase = left_child
return root.get_data()
def __snake_case ( lowerCAmelCase : MyNode , lowerCAmelCase : Any ):
__UpperCAmelCase = root.get_left()
__UpperCAmelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__UpperCAmelCase = get_left_most(lowerCAmelCase )
root.set_data(lowerCAmelCase )
root.set_right(del_node(lowerCAmelCase , lowerCAmelCase ) )
elif left_child is not None:
__UpperCAmelCase = left_child
elif right_child is not None:
__UpperCAmelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(lowerCAmelCase , lowerCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase , lowerCAmelCase ) )
if get_height(lowerCAmelCase ) - get_height(lowerCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__UpperCAmelCase = left_rotation(lowerCAmelCase )
else:
__UpperCAmelCase = rl_rotation(lowerCAmelCase )
elif get_height(lowerCAmelCase ) - get_height(lowerCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__UpperCAmelCase = right_rotation(lowerCAmelCase )
else:
__UpperCAmelCase = lr_rotation(lowerCAmelCase )
__UpperCAmelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase )
return root
class _lowercase:
"""simple docstring"""
def __init__( self: Optional[Any] ):
__UpperCAmelCase = None
def snake_case ( self: Optional[Any] ):
return get_height(self.root )
def snake_case ( self: Optional[Any] ,a: Any ):
print('insert:' + str(a ) )
__UpperCAmelCase = insert_node(self.root ,a )
def snake_case ( self: Union[str, Any] ,a: Any ):
print('delete:' + str(a ) )
if self.root is None:
print('Tree is empty!' )
return
__UpperCAmelCase = del_node(self.root ,a )
def __str__( self: Optional[Any] ,): # a level traversale, gives a more intuitive look on the tree
__UpperCAmelCase = ''
__UpperCAmelCase = MyQueue()
q.push(self.root )
__UpperCAmelCase = self.get_height()
if layer == 0:
return output
__UpperCAmelCase = 0
while not q.is_empty():
__UpperCAmelCase = q.pop()
__UpperCAmelCase = ' ' * int(math.pow(2 ,layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(a )
q.push(a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__UpperCAmelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 ,a ) - 1:
__UpperCAmelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __snake_case ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCamelCase : Tuple = AVLtree()
_UpperCamelCase : List[str] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 396
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase : List[str] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowercase( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCamelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case ( self: Optional[Any] ,a: Optional[int] ,a: Tuple ,a: Tuple ):
__UpperCAmelCase = ZeroShotClassificationPipeline(
model=a ,tokenizer=a ,candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case ( self: int ,a: Union[str, Any] ,a: List[str] ):
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels='politics' )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
# No kwarg
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,['politics'] )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics'] )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels='politics, public health' )
self.assertEqual(
a ,{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 )
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health'] )
self.assertEqual(
a ,{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 )
__UpperCAmelCase = classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='This text is about {}' )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
# https://github.com/huggingface/transformers/issues/13846
__UpperCAmelCase = classifier(['I am happy'] ,['positive', 'negative'] )
self.assertEqual(
a ,[
{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]}
for i in range(1 )
] ,)
__UpperCAmelCase = classifier(['I am happy', 'I am sad'] ,['positive', 'negative'] )
self.assertEqual(
a ,[
{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]}
for i in range(2 )
] ,)
with self.assertRaises(a ):
classifier('' ,candidate_labels='politics' )
with self.assertRaises(a ):
classifier(a ,candidate_labels='politics' )
with self.assertRaises(a ):
classifier('Who are you voting for in 2020?' ,candidate_labels='' )
with self.assertRaises(a ):
classifier('Who are you voting for in 2020?' ,candidate_labels=a )
with self.assertRaises(a ):
classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='Not formatting template' ,)
with self.assertRaises(a ):
classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template=a ,)
self.run_entailment_id(a )
def snake_case ( self: int ,a: Pipeline ):
__UpperCAmelCase = zero_shot_classifier.model.config
__UpperCAmelCase = config.labelaid
__UpperCAmelCase = zero_shot_classifier.entailment_id
__UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
__UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
__UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
__UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
__UpperCAmelCase = original_labelaid
self.assertEqual(a ,zero_shot_classifier.entailment_id )
@require_torch
def snake_case ( self: List[Any] ):
__UpperCAmelCase = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 ,candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def snake_case ( self: Tuple ):
__UpperCAmelCase = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,)
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} ,)
@require_tf
def snake_case ( self: int ):
__UpperCAmelCase = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='tf' ,)
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} ,)
@slow
@require_torch
def snake_case ( self: int ):
__UpperCAmelCase = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='pt' )
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} ,)
__UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=a ,)
self.assertEqual(
nested_simplify(a ) ,{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} ,)
@slow
@require_tf
def snake_case ( self: str ):
__UpperCAmelCase = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='tf' )
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} ,)
__UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=a ,)
self.assertEqual(
nested_simplify(a ) ,{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} ,)
| 396
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE_ (nn.Module ):
'''simple docstring'''
def __init__( self : Any ) ->int:
super().__init__()
lowerCamelCase_ : int = nn.Linear(3 , 4 )
lowerCamelCase_ : int = nn.BatchNormad(4 )
lowerCamelCase_ : Any = nn.Linear(4 , 5 )
def _lowerCAmelCase ( self : int , __a : int ) ->Any:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[Any] ) ->Any:
lowerCamelCase_ : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , model.state_dict() )
lowerCamelCase_ : Optional[int] = os.path.join(__a , """index.json""" )
self.assertTrue(os.path.isfile(__a ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCamelCase_ : Tuple = os.path.join(__a , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(__a ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCAmelCase ( self : Optional[int] ) ->Any:
lowerCamelCase_ : Tuple = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCamelCase_ : int = torch.randn(2 , 3 , dtype=__a )
with TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : int = offload_weight(__a , """weight""" , __a , {} )
lowerCamelCase_ : Optional[int] = os.path.join(__a , """weight.dat""" )
self.assertTrue(os.path.isfile(__a ) )
self.assertDictEqual(__a , {"""weight""": {"""shape""": [2, 3], """dtype""": str(__a ).split(""".""" )[1]}} )
lowerCamelCase_ : Dict = load_offloaded_weight(__a , index["""weight"""] )
self.assertTrue(torch.equal(__a , __a ) )
def _lowerCAmelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase_ : List[Any] = ModelForTest()
lowerCamelCase_ : Optional[Any] = model.state_dict()
lowerCamelCase_ : Optional[Any] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
lowerCamelCase_ : List[str] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
lowerCamelCase_ : int = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
lowerCamelCase_ : Union[str, Any] = {k: v for k, v in state_dict.items() if """weight""" in k}
lowerCamelCase_ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
lowerCamelCase_ : Tuple = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
# Duplicates are removed
lowerCamelCase_ : Optional[int] = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
def _lowerCAmelCase ( self : Dict ) ->Tuple:
lowerCamelCase_ : Union[str, Any] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
lowerCamelCase_ : int = extract_submodules_state_dict(__a , ["""a.1""", """a.2"""] )
self.assertDictEqual(__a , {"""a.1""": 0, """a.2""": 2} )
lowerCamelCase_ : str = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
lowerCamelCase_ : Tuple = extract_submodules_state_dict(__a , ["""a.1""", """a.2"""] )
self.assertDictEqual(__a , {"""a.1.a""": 0, """a.2.a""": 2} )
| 171
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "van"
def __init__( self : int , __a : List[Any]=224 , __a : Dict=3 , __a : List[str]=[7, 3, 3, 3] , __a : Any=[4, 2, 2, 2] , __a : str=[64, 128, 320, 512] , __a : Dict=[3, 3, 12, 3] , __a : List[str]=[8, 8, 4, 4] , __a : List[str]="gelu" , __a : Optional[Any]=0.02 , __a : Dict=1e-6 , __a : List[str]=1e-2 , __a : Optional[int]=0.0 , __a : str=0.0 , **__a : Optional[Any] , ) ->str:
super().__init__(**__a )
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Union[str, Any] = patch_sizes
lowerCamelCase_ : List[Any] = strides
lowerCamelCase_ : Union[str, Any] = hidden_sizes
lowerCamelCase_ : Tuple = depths
lowerCamelCase_ : str = mlp_ratios
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = layer_scale_init_value
lowerCamelCase_ : List[str] = drop_path_rate
lowerCamelCase_ : str = dropout_rate
| 171
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = StableDiffusionPanoramaPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self) -> str:
torch.manual_seed(0)
_lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCamelCase : Optional[Any] = DDIMScheduler()
torch.manual_seed(0)
_lowerCamelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
_lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCamelCase : Any = CLIPTextModel(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
_lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> Tuple:
_lowerCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Optional[int] = self.get_dummy_components()
_lowerCamelCase : Dict = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = sd_pipe(**SCREAMING_SNAKE_CASE).images
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Any = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase_ ( self) -> Tuple:
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCamelCase_ ( self) -> int:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : int = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = """french fries"""
_lowerCamelCase : List[str] = sd_pipe(**SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = output.images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : Tuple = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = sd_pipe(**SCREAMING_SNAKE_CASE , view_batch_size=2)
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : List[str] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""")
_lowerCamelCase : str = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = sd_pipe(**SCREAMING_SNAKE_CASE).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Dict = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Any = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , skip_prk_steps=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = sd_pipe(**SCREAMING_SNAKE_CASE).images
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=0) -> str:
_lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Tuple = """stabilityai/stable-diffusion-2-base"""
_lowerCamelCase : Optional[Any] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE , subfolder="""scheduler""")
_lowerCamelCase : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
pipe.enable_attention_slicing()
_lowerCamelCase : int = self.get_inputs()
_lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCamelCase : Union[str, Any] = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[int] = self.get_inputs()
_lowerCamelCase : str = pipe(**SCREAMING_SNAKE_CASE).images
_lowerCamelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCamelCase : int = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : List[str] = 0
def callback_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCamelCase : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCamelCase : Dict = latents[0, -3:, -3:, -1]
_lowerCamelCase : Tuple = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
_lowerCamelCase : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCamelCase : int = latents[0, -3:, -3:, -1]
_lowerCamelCase : Tuple = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
_lowerCamelCase : Any = False
_lowerCamelCase : Optional[Any] = """stabilityai/stable-diffusion-2-base"""
_lowerCamelCase : Optional[Any] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE , subfolder="""scheduler""")
_lowerCamelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
pipe.enable_attention_slicing()
_lowerCamelCase : Dict = self.get_inputs()
pipe(**SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase_ ( self) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : Tuple = """stabilityai/stable-diffusion-2-base"""
_lowerCamelCase : List[str] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE , subfolder="""scheduler""")
_lowerCamelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : int = self.get_inputs()
_lowerCamelCase : int = pipe(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 88
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__a, num_labels=1_000, idalabel=__a, labelaid=__a, )
return config
def _lowerCamelCase ( __a ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a=False ):
SCREAMING_SNAKE_CASE_ = get_config(__a )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__a, pretrained=__a )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__a )
model.eval()
model.load_state_dict(__a )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__a ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__a, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__a, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__a ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__a, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__a, __a )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 626
| 0
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCamelCase (a_ :int) -> List[Any]:
lowercase :Tuple = {}
lowercase :List[Any] = job['''started_at''']
lowercase :Dict = job['''completed_at''']
lowercase :List[str] = date_parser.parse(a_)
lowercase :str = date_parser.parse(a_)
lowercase :Any = round((end_datetime - start_datetime).total_seconds() / 60.0)
lowercase :str = start
lowercase :Any = end
lowercase :int = duration_in_min
return job_info
def lowerCamelCase (a_ :Optional[int] , a_ :Any=None) -> str:
lowercase :Union[str, Any] = None
if token is not None:
lowercase :Dict = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase :Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase :Optional[int] = requests.get(a_ , headers=a_).json()
lowercase :Tuple = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(a_) for job in result['''jobs''']})
lowercase :Optional[Any] = math.ceil((result['''total_count'''] - 100) / 100)
for i in range(a_):
lowercase :List[str] = requests.get(url + F"""&page={i + 2}""" , headers=a_).json()
job_time.update({job['''name''']: extract_time_from_single_job(a_) for job in result['''jobs''']})
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""")
return {}
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = get_job_time(args.workflow_run_id)
UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 713
|
"""simple docstring"""
UpperCAmelCase = {str(digit): digit**5 for digit in range(10)}
def lowerCamelCase (a_ :int) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_))
def lowerCamelCase () -> int:
return sum(
number
for number in range(1000 , 100_0000)
if number == digits_fifth_powers_sum(a_))
if __name__ == "__main__":
print(solution())
| 475
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : Optional[int] = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __snake_case ( _snake_case ):
_a = '''beit'''
def __init__( self : str , A_ : Optional[int]=8_1_9_2 , A_ : Optional[int]=7_6_8 , A_ : Tuple=1_2 , A_ : List[Any]=1_2 , A_ : Dict=3_0_7_2 , A_ : Union[str, Any]="gelu" , A_ : Optional[Any]=0.0 , A_ : Tuple=0.0 , A_ : Dict=0.02 , A_ : Union[str, Any]=1e-12 , A_ : int=2_2_4 , A_ : Optional[Any]=1_6 , A_ : List[str]=3 , A_ : Optional[int]=False , A_ : Dict=False , A_ : Union[str, Any]=False , A_ : Dict=False , A_ : List[Any]=0.1 , A_ : str=0.1 , A_ : str=True , A_ : List[Any]=[3, 5, 7, 1_1] , A_ : str=[1, 2, 3, 6] , A_ : int=True , A_ : Union[str, Any]=0.4 , A_ : Dict=2_5_6 , A_ : List[str]=1 , A_ : Dict=False , A_ : int=2_5_5 , **A_ : Any , ):
super().__init__(**_UpperCamelCase)
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : str = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[Any] = use_mask_token
lowerCAmelCase_ : Optional[int] = use_absolute_position_embeddings
lowerCAmelCase_ : Tuple = use_relative_position_bias
lowerCAmelCase_ : List[str] = use_shared_relative_position_bias
lowerCAmelCase_ : List[str] = layer_scale_init_value
lowerCAmelCase_ : List[str] = drop_path_rate
lowerCAmelCase_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ : Union[str, Any] = out_indices
lowerCAmelCase_ : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ : Optional[Any] = use_auxiliary_head
lowerCAmelCase_ : Any = auxiliary_loss_weight
lowerCAmelCase_ : List[str] = auxiliary_channels
lowerCAmelCase_ : str = auxiliary_num_convs
lowerCAmelCase_ : Optional[Any] = auxiliary_concat_input
lowerCAmelCase_ : Any = semantic_loss_ignore_index
class __snake_case ( _snake_case ):
_a = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : Tuple):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return 1e-4
| 171
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Any = OmegaConf.load(__snake_case )
UpperCAmelCase_ : List[Any] = torch.load(__snake_case , map_location='cpu' )['model']
UpperCAmelCase_ : Any = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : List[Any] = 'first_stage_model.'
for key in keys:
if key.startswith(__snake_case ):
UpperCAmelCase_ : Any = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : int = 'model.diffusion_model.'
for key in keys:
if key.startswith(__snake_case ):
UpperCAmelCase_ : int = state_dict[key]
UpperCAmelCase_ : Tuple = config.model.params.first_stage_config.params
UpperCAmelCase_ : Optional[Any] = config.model.params.unet_config.params
UpperCAmelCase_ : Any = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
UpperCAmelCase_ : Optional[int] = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
UpperCAmelCase_ : Tuple = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
UpperCAmelCase_ : Union[str, Any] = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 406
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "Salesforce/blip-image-captioning-base"
_lowerCamelCase = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCamelCase = "image_captioner"
_lowerCamelCase = AutoModelForVisionaSeq
_lowerCamelCase = ["image"]
_lowerCamelCase = ["text"]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor(images=UpperCamelCase , return_tensors="pt" )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.model.generate(**UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0].strip()
| 706
|
'''simple docstring'''
from collections.abc import Sequence
def __snake_case ( UpperCAmelCase_ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
lowerCamelCase_ = nums[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
lowerCamelCase_ = nums[i]
lowerCamelCase_ = max(UpperCAmelCase_ , ans + num , UpperCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
a_ : Any = int(input("""Enter number of elements : """).strip())
a_ : Union[str, Any] = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 445
| 0
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 104
|
def __A ( _lowercase = 2_00 ):
'''simple docstring'''
_A = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_A = [0] * (pence + 1)
_A = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 484
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> int:
for attribute in key.split(""".""" ):
__UpperCamelCase : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
__UpperCamelCase : Union[str, Any] = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
__UpperCamelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__UpperCamelCase : Tuple = value
elif weight_type == "weight_g":
__UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : int = value
elif weight_type == "bias":
__UpperCamelCase : Tuple = value
elif weight_type == "running_mean":
__UpperCamelCase : Dict = value
elif weight_type == "running_var":
__UpperCamelCase : Dict = value
elif weight_type == "num_batches_tracked":
__UpperCamelCase : Any = value
elif weight_type == "inv_freq":
__UpperCamelCase : Any = value
else:
__UpperCamelCase : Optional[Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> Optional[int]:
__UpperCamelCase : str = []
__UpperCamelCase : Tuple = fairseq_model.state_dict()
__UpperCamelCase : Any = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase : List[str] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase : Optional[int] = True
if "*" in mapped_key:
__UpperCamelCase : Dict = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
__UpperCamelCase : Any = mapped_key.replace("""*""" , __lowerCAmelCase )
if "pos_bias_u" in name:
__UpperCamelCase : List[str] = None
elif "pos_bias_v" in name:
__UpperCamelCase : Any = None
elif "weight_g" in name:
__UpperCamelCase : Optional[int] = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase : Optional[int] = """weight_v"""
elif "bias" in name:
__UpperCamelCase : Dict = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase : Dict = """weight"""
elif "running_mean" in name:
__UpperCamelCase : str = """running_mean"""
elif "inv_freq" in name:
__UpperCamelCase : Any = """inv_freq"""
elif "running_var" in name:
__UpperCamelCase : Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__UpperCamelCase : int = """num_batches_tracked"""
else:
__UpperCamelCase : int = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
__UpperCamelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase : Optional[int] = name.split(""".""" )
__UpperCamelCase : str = int(items[0] )
__UpperCamelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__UpperCamelCase : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__UpperCamelCase : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__UpperCamelCase : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__UpperCamelCase : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Any=True ) -> Optional[Any]:
if config_path is not None:
__UpperCamelCase : Optional[Any] = WavaVecaConformerConfig.from_pretrained(__lowerCAmelCase , hidden_act="""swish""" )
else:
__UpperCamelCase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__UpperCamelCase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__UpperCamelCase : Union[str, Any] = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase : Any = target_dict.pad_index
__UpperCamelCase : Optional[int] = target_dict.bos_index
__UpperCamelCase : str = target_dict.eos_index
__UpperCamelCase : Optional[Any] = len(target_dict.symbols )
__UpperCamelCase : Tuple = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__UpperCamelCase : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : Optional[int] = 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : Optional[int] = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
__UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
__UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
__UpperCamelCase : List[Any] = WavaVecaConformerForCTC(__lowerCAmelCase )
else:
__UpperCamelCase : str = WavaVecaConformerForPreTraining(__lowerCAmelCase )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase : int = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase : Tuple = fairseq.tasks.setup_task(__lowerCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 515
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCamelCase = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : int = cn.convert_to_negative(__lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCamelCase ( ) -> Optional[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCamelCase ( ) -> Dict:
__UpperCamelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCamelCase ( ) -> str:
__UpperCamelCase : List[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : Optional[int] = canny.canny(__lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCamelCase ( ) -> Optional[int]:
assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all()
def __lowerCamelCase ( ) -> Tuple:
# laplace diagonals
__UpperCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Any = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
assert res.any()
def __lowerCamelCase ( ) -> List[str]:
assert med.median_filter(__lowerCAmelCase , 3 ).any()
def __lowerCamelCase ( ) -> int:
__UpperCamelCase , __UpperCamelCase : List[Any] = sob.sobel_filter(__lowerCAmelCase )
assert grad.any() and theta.any()
def __lowerCamelCase ( ) -> Optional[int]:
__UpperCamelCase : int = sp.make_sepia(__lowerCAmelCase , 20 )
assert sepia.all()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
__UpperCamelCase : str = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__UpperCamelCase : Dict = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
__UpperCamelCase : Any = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__UpperCamelCase : int = imread(__lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : Dict = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : str = image[x_coordinate][y_coordinate]
__UpperCamelCase : Tuple = lbp.get_neighbors_pixel(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : str = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert lbp_image.any()
| 515
| 1
|
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """FlavaImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("feature_extractor" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCamelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if images is not None:
UpperCamelCase = self.image_processor(
SCREAMING_SNAKE_CASE , return_image_mask=SCREAMING_SNAKE_CASE , return_codebook_pixels=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor
| 606
| 0
|
'''simple docstring'''
from collections import defaultdict
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = first_str.lower().strip()
lowerCamelCase__ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase__ = first_str.replace(""" """ , """""" )
lowerCamelCase__ = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
return False
# Default values for count should be 0
lowerCamelCase__ = defaultdict(__lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase : List[Any] = input('Enter the first string ').strip()
UpperCamelCase : List[str] = input('Enter the second string ').strip()
UpperCamelCase : Union[str, Any] = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 705
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.