code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
_lowerCamelCase : Optional[Any] =self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCamelCase : List[Any] =audio_length_in_s * self.unet.config.sample_rate
_lowerCamelCase : Any =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_lowerCamelCase : Dict =int(lowercase_ )
if sample_size % down_scale_factor != 0:
_lowerCamelCase : Optional[Any] =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
_lowerCamelCase : int =int(lowercase_ )
_lowerCamelCase : Optional[Any] =next(iter(self.unet.parameters() ) ).dtype
_lowerCamelCase : Union[str, Any] =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCamelCase : Dict =randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
_lowerCamelCase : int =self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase : int =self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCamelCase : int =self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
_lowerCamelCase : Optional[int] =audio.clamp(-1 , 1 ).float().cpu().numpy()
_lowerCamelCase : Tuple =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 464
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '▁'
lowerCamelCase = {'vocab_file': 'prophetnet.tokenizer'}
lowerCamelCase = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': 5_12,
}
def a_ ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : List[str] =collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as reader:
_lowerCamelCase : Dict =reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : int =token.rstrip('\n' )
_lowerCamelCase : Tuple =index
return vocab
class A ( UpperCamelCase_ ):
UpperCamelCase__ : str =VOCAB_FILES_NAMES
UpperCamelCase__ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int =['input_ids', 'attention_mask']
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]="[SEP]" , lowercase_ : Any="[SEP]" , lowercase_ : Optional[int]="[SEP]" , lowercase_ : Optional[Any]="[UNK]" , lowercase_ : int="[PAD]" , lowercase_ : Union[str, Any]="[CLS]" , lowercase_ : Optional[int]="[MASK]" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : str , ) -> None:
"""simple docstring"""
_lowerCamelCase : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
_lowerCamelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
_lowerCamelCase : Dict =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_lowerCamelCase : int ={'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
_lowerCamelCase : Dict =F'''[unused{i}]'''
_lowerCamelCase : List[str] =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_lowerCamelCase : Any =12
_lowerCamelCase : Dict ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowercase_ )
def __getstate__( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Dict =self.__dict__.copy()
_lowerCamelCase : Dict =None
return state
def __setstate__( self : str , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : Any ={}
_lowerCamelCase : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return ([0] * len(lowercase_ )) + [1]
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase : Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : List[Any] , lowercase_ : str ) -> str:
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowerCamelCase ( self : List[str] , lowercase_ : Dict ) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Optional[int] =self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase ( self : str , lowercase_ : Dict ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self : Any , lowercase_ : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Dict =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
_lowerCamelCase : Dict =self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 464
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase : Optional[int] , lowercase : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if not (isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
lowercase__ = len(lowercase )
lowercase__ = len(lowercase )
lowercase__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowercase__ = 0
lowercase__ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowercase__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowercase__ = i
lowercase__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( a ):
"""simple docstring"""
A_ = ''
A_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A_ = None # compression type in fsspec. ex: "gzip"
A_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase )-> List[str]:
super().__init__(self , **_lowerCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ = fsspec.open(
_lowerCamelCase , mode='''rb''' , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowercase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ = None
@classmethod
def snake_case_( cls , _lowerCamelCase )-> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase ).lstrip('''/''' )
def snake_case_( self )-> List[str]:
if self.dir_cache is None:
lowercase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ = {f['''name''']: f}
def snake_case_( self , _lowerCamelCase )-> List[str]:
return self.file.open().read()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , )-> Tuple:
lowercase__ = self._strip_protocol(_lowerCamelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __A ( a ):
"""simple docstring"""
A_ = 'bz2'
A_ = 'bz2'
A_ = '.bz2'
class __A ( a ):
"""simple docstring"""
A_ = 'gzip'
A_ = 'gzip'
A_ = '.gz'
class __A ( a ):
"""simple docstring"""
A_ = 'lz4'
A_ = 'lz4'
A_ = '.lz4'
class __A ( a ):
"""simple docstring"""
A_ = 'xz'
A_ = 'xz'
A_ = '.xz'
class __A ( a ):
"""simple docstring"""
A_ = 'zstd'
A_ = 'zstd'
A_ = '.zst'
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , )-> Tuple:
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ = self.file.__enter__
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = file_
def __enter__( self )-> int:
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase )-> List[str]:
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase )
def __iter__( self )-> int:
return iter(self._file )
def snake_case_( self )-> List[Any]:
return next(self._file )
def __getattr__( self , _lowerCamelCase )-> Any:
return getattr(self._file , _lowerCamelCase )
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase ):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase ) )
lowercase__ = fixed_enter
| 318
| 0
|
"""simple docstring"""
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Union[str, Any] = len(lowerCAmelCase )
A__ : Tuple = [[0] * n for i in range(lowerCAmelCase )]
for i in range(lowerCAmelCase ):
A__ : List[Any] = y_points[i]
for i in range(2 , lowerCAmelCase ):
for j in range(lowerCAmelCase , lowerCAmelCase ):
A__ : Union[str, Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __UpperCAmelCase (unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
A__ : List[Any] = Vector()
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , """(0,0,0,0,0,1)""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Any = Vector([1, 2] )
A__ : Optional[int] = Vector([1, 2, 3, 4, 5] )
A__ : Tuple = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
A__ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Union[str, Any] = Vector([1, 2, 3] )
A__ : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[int] = Vector([1, 2, 3] )
A__ : int = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Tuple = Vector([1, 2, 3] )
A__ : Any = Vector([2, -1, 4] ) # for test of dot product
A__ : Optional[Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Union[str, Any] = Vector([1, 2, 3] )
A__ : Tuple = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , """(3,4,7)""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = Vector([1, 0, 0, 0, 0, 0] )
A__ : List[str] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : str = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , """(0,1,0)""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(snake_case_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[int] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
A__ : int = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(snake_case_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A__ : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 363
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {}
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''llama'''
UpperCamelCase__ =['''past_key_values''']
def __init__( self : Dict , lowerCamelCase_ : List[str]=32000 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : Any=11008 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Optional[int]=2048 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Tuple=1E-6 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Dict=False , lowerCamelCase_ : int=None , **lowerCamelCase_ : Tuple , ) -> Tuple:
__magic_name__ : Any = vocab_size
__magic_name__ : Optional[int] = max_position_embeddings
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Dict = intermediate_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Optional[int] = num_key_value_heads
__magic_name__ : List[str] = hidden_act
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = rms_norm_eps
__magic_name__ : Optional[int] = pretraining_tp
__magic_name__ : Any = use_cache
__magic_name__ : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self : str ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__magic_name__ : List[str] = self.rope_scaling.get('''type''' , lowerCamelCase_ )
__magic_name__ : str = self.rope_scaling.get('''factor''' , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 501
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ = 1_000_000 ):
lowerCamelCase_ = set(range(3 ,lowerCAmelCase__ ,2 ) )
primes.add(2 )
for p in range(3 ,lowerCAmelCase__ ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,lowerCAmelCase__ ,lowerCAmelCase__ ) ) )
lowerCamelCase_ = [float(lowerCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase__ ,limit + 1 ,lowerCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 29
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase( ) -> int:
A_ = ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' )
A_ = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DownloadCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
ServeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
UserCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
AddNewModelLikeCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
LfsCommands.register_subcommand(SCREAMING_SNAKE_CASE_ )
PTtoTFCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
A_ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ ,'func' ):
parser.print_help()
exit(1 )
# Run
A_ = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 366
| 0
|
UpperCAmelCase_ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->list[str]:
_lowerCAmelCase = set()
# keep track of all the paths to be checked
_lowerCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCAmelCase = queue.pop(0 )
# get the last node from the path
_lowerCAmelCase = path[-1]
if node not in explored:
_lowerCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
new_path.append(_SCREAMING_SNAKE_CASE )
queue.append(_SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCAmelCase = [start]
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
_lowerCAmelCase = {start: 0, target: -1}
while queue:
_lowerCAmelCase = queue.pop(0 )
if node == target:
_lowerCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
queue.append(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 664
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664
| 1
|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , lowerCAmelCase : str ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.load(__snake_case )
else:
try:
lowerCAmelCase = baseaa.urlsafe_baadecode(__snake_case ).decode("""utf-8""" )
lowerCAmelCase = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowerCAmelCase = config
self.set_stage_and_offload()
def __lowercase ( self : Any ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase = set(["""cpu""", """nvme"""] )
lowerCAmelCase = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase = True
def __lowercase ( self : str , lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = self.config
# find the config node of interest if it exists
lowerCAmelCase = ds_key_long.split(""".""" )
lowerCAmelCase = nodes.pop()
for node in nodes:
lowerCAmelCase = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def __lowercase ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=None ):
lowerCAmelCase = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def __lowercase ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ):
lowerCAmelCase = self.config
# find the config node of interest if it exists
lowerCAmelCase = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase = config
lowerCAmelCase = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def __lowercase ( self : List[str] , lowerCAmelCase : Dict ):
lowerCAmelCase = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def __lowercase ( self : str , lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def __lowercase ( self : Tuple ):
return self._stage == 2
def __lowercase ( self : str ):
return self._stage == 3
def __lowercase ( self : int ):
return self._offload
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase : Any ):
lowerCAmelCase = engine
def __lowercase ( self : Optional[Any] , lowerCAmelCase : int , **lowerCAmelCase : List[Any] ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self : Dict , lowerCAmelCase : Any ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
lowerCAmelCase = hasattr(self.optimizer , """overflow""" )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : str=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowercase ( self : int ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowercase ( self : Any ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : str ):
super().__init__(__snake_case , __snake_case )
def __lowercase ( self : Union[str, Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int=0.001 , lowerCAmelCase : Any=0 , **lowerCAmelCase : List[Any] ):
lowerCAmelCase = params
lowerCAmelCase = lr
lowerCAmelCase = weight_decay
lowerCAmelCase = kwargs
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int=None , lowerCAmelCase : Dict=0 , **lowerCAmelCase : Optional[int] ):
lowerCAmelCase = optimizer
lowerCAmelCase = total_num_steps
lowerCAmelCase = warmup_num_steps
lowerCAmelCase = kwargs
| 169
|
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase: Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase: Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
lowerCAmelCase: int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase: int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 526
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'''vocab_file''': '''vocab.txt'''}
UpperCAmelCase_ = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
UpperCAmelCase_ = {
'''openbmb/cpm-ant-10b''': 1_024,
}
def UpperCamelCase ( lowerCAmelCase_ ) -> List[str]:
'''simple docstring'''
_A= collections.OrderedDict()
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as reader:
_A= reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_A= token.rstrip('\n' )
_A= index
return vocab
class lowerCAmelCase ( _a ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , lowerCAmelCase__=200 ):
_A= vocab
_A= unk_token
_A= max_input_chars_per_word
def a__ ( self , lowerCAmelCase__ ):
_A= list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_A= 0
_A= []
while start < len(lowerCAmelCase__ ):
_A= len(lowerCAmelCase__ )
_A= None
while start < end:
_A= ''.join(chars[start:end] )
if substr in self.vocab:
_A= substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase__ )
_A= end
return sub_tokens
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Union[str, Any] =VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple =PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] =["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE : List[str] =False
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<d>" , lowerCAmelCase__="</d>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="</n>" , lowerCAmelCase__="</_>" , lowerCAmelCase__="left" , **lowerCAmelCase__ , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=lowerCAmelCase__ , eod_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , line_token=lowerCAmelCase__ , space_token=lowerCAmelCase__ , padding_side=lowerCAmelCase__ , **lowerCAmelCase__ , )
_A= bod_token
_A= eod_token
_A= load_vocab(lowerCAmelCase__ )
_A= self.encoder[space_token]
_A= self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_A= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
_A= {v: k for k, v in self.encoder.items()}
_A= WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def a__ ( self ):
return self.encoder[self.bod_token]
@property
def a__ ( self ):
return self.encoder[self.eod_token]
@property
def a__ ( self ):
return self.encoder["\n"]
@property
def a__ ( self ):
return len(self.encoder )
def a__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , lowerCAmelCase__ ):
_A= []
for x in jieba.cut(lowerCAmelCase__ , cut_all=lowerCAmelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase__ ) )
return output_tokens
def a__ ( self , lowerCAmelCase__ , **lowerCAmelCase__ ):
_A= [i for i in token_ids if i >= 0]
_A= [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase__ , **lowerCAmelCase__ )
def a__ ( self , lowerCAmelCase__ ):
return token in self.encoder
def a__ ( self , lowerCAmelCase__ ):
return "".join(lowerCAmelCase__ )
def a__ ( self , lowerCAmelCase__ ):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def a__ ( self , lowerCAmelCase__ ):
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if os.path.isdir(lowerCAmelCase__ ):
_A= os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_A= (filename_prefix + '-' if filename_prefix else '') + save_directory
_A= 0
if " " in self.encoder:
_A= self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
_A= self.encoder['\n']
del self.encoder["\n"]
_A= collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
_A= token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ ))
return [1] + ([0] * len(lowerCAmelCase__ ))
| 476
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=_a ):
_SCREAMING_SNAKE_CASE : Optional[int] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 476
| 1
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :int = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
__magic_name__ : int = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
__magic_name__ : List[Any] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__magic_name__ : Any = key[key.find('patch_embed' ) + len('patch_embed' )]
__magic_name__ : Tuple = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(lowerCAmelCase )-1}' )
if "norm" in key:
__magic_name__ : Any = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__magic_name__ : Union[str, Any] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
__magic_name__ : Tuple = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(lowerCAmelCase )-1}' )
if "layer_norm1" in key:
__magic_name__ : List[str] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
__magic_name__ : Tuple = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
__magic_name__ : Dict = key[key.find('block' ) + len('block' )]
__magic_name__ : Union[str, Any] = key.replace(f'block{idx}' , f'block.{int(lowerCAmelCase )-1}' )
if "attn.q" in key:
__magic_name__ : Any = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
__magic_name__ : List[Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
__magic_name__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
__magic_name__ : List[str] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
__magic_name__ : Optional[int] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
__magic_name__ : int = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
__magic_name__ : Dict = key.replace('linear_fuse.conv' , 'linear_fuse' )
__magic_name__ : List[Any] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__magic_name__ : Tuple = key[key.find('linear_c' ) + len('linear_c' )]
__magic_name__ : Tuple = key.replace(f'linear_c{idx}' , f'linear_c.{int(lowerCAmelCase )-1}' )
if "bot_conv" in key:
__magic_name__ : Optional[int] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
__magic_name__ : Dict = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
__magic_name__ : str = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
__magic_name__ : Optional[Any] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
__magic_name__ : Dict = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
__magic_name__ : int = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
__magic_name__ : List[Any] = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
__magic_name__ : Tuple = key.replace('module.last_layer_depth' , 'head.head' )
__magic_name__ : List[Any] = value
return new_state_dict
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__magic_name__ : Optional[int] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
__magic_name__ : List[Any] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
__magic_name__ : Any = kv_weight[
: config.hidden_sizes[i], :
]
__magic_name__ : List[Any] = kv_bias[: config.hidden_sizes[i]]
__magic_name__ : Optional[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
__magic_name__ : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__magic_name__ : Optional[Any] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int=False , lowerCAmelCase : Tuple=None ):
"""simple docstring"""
__magic_name__ : Dict = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__magic_name__ : int = GLPNImageProcessor()
# prepare image
__magic_name__ : Optional[Any] = prepare_img()
__magic_name__ : int = image_processor(images=lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
__magic_name__ : Dict = torch.load(lowerCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
__magic_name__ : Tuple = rename_keys(lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase , lowerCAmelCase )
# create HuggingFace model and load state dict
__magic_name__ : Optional[int] = GLPNForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# forward pass
__magic_name__ : Union[str, Any] = model(lowerCAmelCase )
__magic_name__ : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__magic_name__ : Optional[int] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
__magic_name__ : str = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
__magic_name__ : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
lowerCAmelCase :int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
lowerCAmelCase :List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 561
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase :Dict = logging.get_logger(__name__)
lowerCAmelCase :int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase :List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCamelCase :
'''simple docstring'''
A_ : str = field(
default=lowercase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowercase__ )} )
A_ : str = field(
default=lowercase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
A_ : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ : int = field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
A_ : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
A_ : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
A_ : bool = field(
default=lowercase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
A_ : bool = field(
default=lowercase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
A_ : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
A_ : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
A_ : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
A_ : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """train"""
A_ : str = """dev"""
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : SquadDataTrainingArguments
A_ : List[SquadFeatures]
A_ : Split
A_ : bool
def __init__( self : Optional[int] , _A : SquadDataTrainingArguments , _A : PreTrainedTokenizer , _A : Optional[int] = None , _A : Union[str, Split] = Split.train , _A : Optional[bool] = False , _A : Optional[str] = None , _A : Optional[str] = "pt" , ) -> List[Any]:
__magic_name__ : int = args
__magic_name__ : Optional[Any] = is_language_sensitive
__magic_name__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__magic_name__ : List[Any] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
__magic_name__ : List[Any] = mode
# Load data features from cache or dataset file
__magic_name__ : Union[str, Any] = 'v2' if args.version_2_with_negative else 'v1'
__magic_name__ : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : Optional[int] = cached_features_file + '.lock'
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__magic_name__ : List[Any] = time.time()
__magic_name__ : Optional[Any] = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__magic_name__ : Optional[Any] = self.old_features['features']
__magic_name__ : str = self.old_features.get('dataset' , _A )
__magic_name__ : str = self.old_features.get('examples' , _A )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
__magic_name__ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
__magic_name__ : Union[str, Any] = self.processor.get_train_examples(args.data_dir )
__magic_name__ , __magic_name__ : Optional[int] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__magic_name__ : int = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : List[Any] ) -> Optional[int]:
return len(self.features )
def __getitem__( self : Optional[Any] , _A : int ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
__magic_name__ : Dict = self.features[i]
__magic_name__ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
__magic_name__ : Dict = torch.tensor(feature.attention_mask , dtype=torch.long )
__magic_name__ : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
__magic_name__ : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
__magic_name__ : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
__magic_name__ : Any = torch.tensor(feature.is_impossible , dtype=torch.float )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__magic_name__ : List[str] = torch.tensor(feature.start_position , dtype=torch.long )
__magic_name__ : Dict = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 561
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = KandinskyImgaImgPipeline
SCREAMING_SNAKE_CASE_ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
SCREAMING_SNAKE_CASE_ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
SCREAMING_SNAKE_CASE_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ = False
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 1_00
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__lowerCamelCase = MultilingualCLIP(_snake_case )
__lowerCamelCase = text_encoder.eval()
return text_encoder
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__lowerCamelCase = UNetaDConditionModel(**_snake_case )
return model
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_unet
__lowerCamelCase = self.dummy_movq
__lowerCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__lowerCamelCase = DDIMScheduler(**_snake_case )
__lowerCamelCase = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCamelCase ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
__lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(_snake_case ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(_snake_case )
else:
__lowerCamelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCamelCase = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = pipe(**self.get_dummy_inputs(_snake_case ) )
__lowerCamelCase = output.images
__lowerCamelCase = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__lowerCamelCase = '''A red cartoon frog, 4k'''
__lowerCamelCase = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
__lowerCamelCase = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__lowerCamelCase = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCamelCase , __lowerCamelCase = pipe_prior(
_snake_case , generator=_snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__lowerCamelCase = pipeline(
_snake_case , image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , generator=_snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
__lowerCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 575
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.get_dummy_input()
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowerCamelCase ( self , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False , ):
"""simple docstring"""
__lowerCamelCase = 4
__lowerCamelCase = 32
__lowerCamelCase = (32, 32)
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = torch.device(_snake_case )
__lowerCamelCase = (batch_size, num_channels) + sizes
__lowerCamelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case )
__lowerCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
__lowerCamelCase = 1_28
__lowerCamelCase = randn_tensor((batch_size, temb_channels) , generator=_snake_case , device=_snake_case )
if include_res_hidden_states_tuple:
__lowerCamelCase = torch.manual_seed(1 )
__lowerCamelCase = (randn_tensor(_snake_case , generator=_snake_case , device=_snake_case ),)
if include_encoder_hidden_states:
__lowerCamelCase = floats_tensor((batch_size, 32, 32) ).to(_snake_case )
if include_skip_sample:
__lowerCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_snake_case , device=_snake_case )
return dummy_input
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 1_28,
}
if self.block_type == "up":
__lowerCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**_snake_case )
unet_block.to(_snake_case )
unet_block.eval()
with torch.no_grad():
__lowerCamelCase = unet_block(**_snake_case )
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowerCamelCase = output[0, -1, -3:, -3:]
__lowerCamelCase = torch.tensor(_snake_case ).to(_snake_case )
assert torch_all_close(output_slice.flatten() , _snake_case , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**_snake_case )
model.to(_snake_case )
model.train()
__lowerCamelCase = model(**_snake_case )
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = output[0]
__lowerCamelCase = torch.device(_snake_case )
__lowerCamelCase = randn_tensor(output.shape , device=_snake_case )
__lowerCamelCase = torch.nn.functional.mse_loss(_snake_case , _snake_case )
loss.backward()
| 575
| 1
|
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [int(snake_case__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(snake_case__ ) == 4 and all(0 <= int(snake_case__ ) <= 2_54 for octet in octets )
if __name__ == "__main__":
A_ : Dict = input().strip()
A_ : Optional[Any] = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 196
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
A_ : Union[str, Any] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'ernie_m'
lowerCamelCase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , __UpperCAmelCase : int = 2_5_0_0_0_2 , __UpperCAmelCase : int = 7_6_8 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 3_0_7_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 5_1_4 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1e-05 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Union[str, Any]=0.0 , **__UpperCAmelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = is_decoder
SCREAMING_SNAKE_CASE__ = act_dropout
| 196
| 1
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__lowercase = logging.getLogger(__name__)
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_UpperCamelCase : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
_UpperCamelCase : int = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def __A ( self : Optional[Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase_ : Union[str, Any] =self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase_ : Dict =self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase : str = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ : List[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ : List[str] =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase_ : List[Any] =training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase_ : Union[str, Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : Tuple =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase_ : Dict ={'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase_ : Dict =data_args.train_file.split('''.''' )[-1]
lowerCAmelCase_ : int =data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase_ : Dict =data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase_ : Tuple =load_dataset('''csv''' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase_ : List[Any] =load_dataset('''json''' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase_ : Tuple =raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase_ : List[str] =len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : List[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase_ : int =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ : str =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase_ : Union[str, Any] ='''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase_ : int =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase_ : Tuple ={'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase_ : int ={0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase_ : List[str] =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_SCREAMING_SNAKE_CASE ):
# Tokenize the texts
def _convert_table_text_to_pandas(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Any =[_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase_ : Any =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase_ : int =examples['''statement''']
lowerCAmelCase_ : Optional[Any] =list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase_ : Optional[Any] =tokenizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[Any] =examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase_ : Any =raw_datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase_ : Union[str, Any] =raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase_ : int =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase_ : int =raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase_ : Any =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase_ : Any =raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase_ : Optional[int] =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Optional[Any] =p.predictions[0] if isinstance(p.predictions , _SCREAMING_SNAKE_CASE ) else p.predictions
lowerCAmelCase_ : List[str] =np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase_ : Union[str, Any] =default_data_collator
elif training_args.fpaa:
lowerCAmelCase_ : Tuple =DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
lowerCAmelCase_ : List[str] =None
# Initialize our Trainer
lowerCAmelCase_ : str =Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase_ : List[str] =None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ : Dict =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ : str =last_checkpoint
lowerCAmelCase_ : List[str] =trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Tuple =train_result.metrics
lowerCAmelCase_ : Union[str, Any] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
lowerCAmelCase_ : Any =min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''train''' , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase_ : Any =trainer.evaluate(eval_dataset=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[int] =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Tuple =min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase_ : Any =predict_dataset.remove_columns('''label''' )
lowerCAmelCase_ : Optional[Any] =trainer.predict(_SCREAMING_SNAKE_CASE , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase_ : Tuple =np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
lowerCAmelCase_ : Dict =os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Dict =label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase_ : List[str] ={'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 705
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowercase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : List[str] ):
lowerCAmelCase_ : str =TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def __A ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __A ( self : Dict ):
lowerCAmelCase_ : List[Any] =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
lowerCAmelCase_ : List[str] =BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , repo_id='''test-config''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowerCAmelCase_ : List[Any] =BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
lowerCAmelCase_ : Union[str, Any] =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id='''valid_org/test-config-org''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowerCAmelCase_ : int =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __A ( self : str ):
CustomConfig.register_for_auto_class()
lowerCAmelCase_ : List[str] =CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowerCAmelCase_ : Optional[int] =AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Dict ):
lowerCAmelCase_ : int =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCAmelCase_ : str =c.n_embd + 1 # int
lowerCAmelCase_ : Tuple =c.resid_pdrop + 1.0 # float
lowerCAmelCase_ : Optional[Any] =not c.scale_attn_weights # bool
lowerCAmelCase_ : Union[str, Any] =c.summary_type + '''foo''' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(UpperCamelCase_ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(UpperCamelCase_ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(UpperCamelCase_ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(UpperCamelCase_ , c.summary_type , '''mismatch for key: summary_type''' )
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] =PretrainedConfig()
lowerCAmelCase_ : Dict =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCamelCase_ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowerCAmelCase_ : Tuple =[key for key, value in config_common_kwargs.items() if value == getattr(UpperCamelCase_ , UpperCamelCase_ )]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F' {", ".join(UpperCamelCase_ )}.' )
def __A ( self : Optional[int] ):
with self.assertRaises(UpperCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase_ : str =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowerCAmelCase_ : int =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(UpperCamelCase_ )
def __A ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : List[str] =mock.Mock()
lowerCAmelCase_ : List[str] =500
lowerCAmelCase_ : Tuple ={}
lowerCAmelCase_ : Union[str, Any] =HTTPError
lowerCAmelCase_ : Dict ={}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Dict =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head:
lowerCAmelCase_ : List[str] =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Dict ):
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : Union[str, Any] =BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __A ( self : int ):
lowerCAmelCase_ : Union[str, Any] =AutoConfig.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ : Tuple =['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCamelCase_ )
lowerCAmelCase_ : str =2
json.dump(configuration.to_dict() , open(os.path.join(UpperCamelCase_ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCAmelCase_ : Optional[Any] =AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCAmelCase_ : List[Any] =['''config.42.0.0.json''']
lowerCAmelCase_ : Optional[int] =768
configuration.save_pretrained(UpperCamelCase_ )
shutil.move(os.path.join(UpperCamelCase_ , '''config.4.0.0.json''' ) , os.path.join(UpperCamelCase_ , '''config.42.0.0.json''' ) )
lowerCAmelCase_ : int =AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __A ( self : int ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCAmelCase_ : Dict ='''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowerCAmelCase_ : Tuple ='''v4.0.0'''
lowerCAmelCase_ , lowerCAmelCase_ : int =new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCamelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCAmelCase_ : Optional[Any] ='''v3.0.0'''
lowerCAmelCase_ : List[Any] =old_transformers.models.auto.AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 305
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase_ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Tuple:
re.sub("<n>" , "" , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 418
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 5_0 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[ImagePipelineOutput, Tuple]:
__a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase , )
__a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase ), "This is a local test"
| 559
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *a , **a) -> Optional[Any]:
pass
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.array(__UpperCamelCase)
SCREAMING_SNAKE_CASE = npimg.shape
return {"hash": hashimage(__UpperCamelCase), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
_lowercase : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowercase : List[str] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = MaskGenerationPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = pipeline('mask-generation' , model='facebook/sam-vit-huge')
SCREAMING_SNAKE_CASE = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_SCREAMING_SNAKE_CASE), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = 'facebook/sam-vit-huge'
SCREAMING_SNAKE_CASE = pipeline('mask-generation' , model=_SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_SCREAMING_SNAKE_CASE), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
] , )
| 713
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ : List[str] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ (_UpperCAmelCase):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested')
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested')
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested')
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment')
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate')
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule')
def lowerCamelCase__ (_UpperCAmelCase):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports')
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE = 0
# Doctest custom flag to ignore output.
a_ : List[Any] = doctest.register_optionflag('IGNORE_RESULT')
a_ : Any = doctest.OutputChecker
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a)
a_ : int = CustomOutputChecker
a_ : List[str] = HfDoctestModule
a_ : Optional[int] = HfDocTestParser
| 444
| 0
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: list ):
"""simple docstring"""
_lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_lowerCAmelCase = True
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_lowerCAmelCase , _lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_lowerCAmelCase = False
for i in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_lowerCAmelCase , _lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
_snake_case = [int(x) for x in input().split()]
# inputing elements of the list in one line
_snake_case = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 580
|
"""simple docstring"""
import math
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ =(("""eta""", 0.0), ("""num_inference_steps""", 50))
def __lowerCAmelCase ( self, **_a ) -> Dict:
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_a )
return config
def __lowerCAmelCase ( self, **_a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> List[Any]:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_a )
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(steps_offset=1 )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __lowerCAmelCase ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a, beta_end=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_a )
def __lowerCAmelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_a, prediction_type=_a, sample_max_value=_a, )
def __lowerCAmelCase ( self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=_a )
def __lowerCAmelCase ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 5_00] ):
self.check_over_forward(time_step=_a, num_inference_steps=_a )
def __lowerCAmelCase ( self ) -> Any:
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_a, eta=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20, 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80, 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87, 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99, 9_98 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
scheduler.set_timesteps(_a )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
__SCREAMING_SNAKE_CASE = samplea.shape[0]
__SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea], dim=0 )
__SCREAMING_SNAKE_CASE = torch.arange(_a )[0:3, None].repeat(1, _a )
__SCREAMING_SNAKE_CASE = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
__SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(_a, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), _a )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 214
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[int] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 214
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'trocr'
lowerCAmelCase_ = ['past_key_values']
lowerCAmelCase_ = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : List[Any],__A : int=5_0_2_6_5,__A : Union[str, Any]=1_0_2_4,__A : Any=1_2,__A : Any=1_6,__A : List[str]=4_0_9_6,__A : Tuple="gelu",__A : List[str]=5_1_2,__A : Tuple=0.1,__A : str=0.0,__A : Tuple=0.0,__A : List[Any]=2,__A : List[Any]=0.02,__A : Union[str, Any]=0.0,__A : Optional[int]=True,__A : List[Any]=False,__A : int=True,__A : Optional[int]=True,__A : List[str]=1,__A : Tuple=0,__A : Tuple=2,**__A : List[Any],):
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : str = d_model
_lowerCamelCase : Dict = decoder_layers
_lowerCamelCase : Dict = decoder_attention_heads
_lowerCamelCase : Union[str, Any] = decoder_ffn_dim
_lowerCamelCase : int = activation_function
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = dropout
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : Tuple = activation_dropout
_lowerCamelCase : Dict = init_std
_lowerCamelCase : str = decoder_layerdrop
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : Any = scale_embedding
_lowerCamelCase : int = use_learned_position_embeddings
_lowerCamelCase : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=__A,bos_token_id=__A,eos_token_id=__A,decoder_start_token_id=__A,**__A,)
| 44
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""Speech2TextFeatureExtractor"""
__UpperCAmelCase : Union[str, Any] ="""Speech2TextTokenizer"""
def __init__( self , __a , __a ):
super().__init__(__a , __a )
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
def __call__( self , *__a , **__a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__lowerCAmelCase = kwargs.pop("raw_speech" )
else:
__lowerCAmelCase = kwargs.pop("audio" , __a )
__lowerCAmelCase = kwargs.pop("sampling_rate" , __a )
__lowerCAmelCase = kwargs.pop("text" , __a )
if len(__a ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowerCAmelCase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase = encodings["input_ids"]
return inputs
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def snake_case ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer
yield
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
| 636
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Optional[Any] = 'openai-gpt'
__snake_case : List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str=40_478 , SCREAMING_SNAKE_CASE : str=512 , SCREAMING_SNAKE_CASE : List[str]=768 , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Tuple=1E-5 , SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]="cls_index" , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str=0.1 , **SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :List[str] = n_positions
SCREAMING_SNAKE_CASE_ :Any = n_embd
SCREAMING_SNAKE_CASE_ :Tuple = n_layer
SCREAMING_SNAKE_CASE_ :int = n_head
SCREAMING_SNAKE_CASE_ :Optional[Any] = afn
SCREAMING_SNAKE_CASE_ :Any = resid_pdrop
SCREAMING_SNAKE_CASE_ :Optional[Any] = embd_pdrop
SCREAMING_SNAKE_CASE_ :Any = attn_pdrop
SCREAMING_SNAKE_CASE_ :Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ :Tuple = initializer_range
SCREAMING_SNAKE_CASE_ :Any = summary_type
SCREAMING_SNAKE_CASE_ :Optional[int] = summary_use_proj
SCREAMING_SNAKE_CASE_ :Tuple = summary_activation
SCREAMING_SNAKE_CASE_ :Union[str, Any] = summary_first_dropout
SCREAMING_SNAKE_CASE_ :Dict = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE )
| 233
|
'''simple docstring'''
import argparse
import struct
import unittest
class __lowerCAmelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = data
# Initialize hash values
SCREAMING_SNAKE_CASE_ :Any = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE_ :Optional[int] = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
SCREAMING_SNAKE_CASE_ :Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = b'\x80' + (b'\x00' * (63 - (len(SCREAMING_SNAKE_CASE ) + 8) % 64))
SCREAMING_SNAKE_CASE_ :int = struct.pack('>Q' , (len(SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE_ :Any = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE_ :Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE_ :List[str] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE_ :Optional[int] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.ror(SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(SCREAMING_SNAKE_CASE , 25 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
SCREAMING_SNAKE_CASE_ :List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
SCREAMING_SNAKE_CASE_ :List[str] = self.ror(SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(SCREAMING_SNAKE_CASE , 22 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE_ :str = (sa + maj) % 0x1_00_00_00_00
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
SCREAMING_SNAKE_CASE_ :Optional[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE_ :List[Any] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE_ :List[str] = ''.join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
import hashlib
SCREAMING_SNAKE_CASE_ :List[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() )
def SCREAMING_SNAKE_CASE__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ :str = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
SCREAMING_SNAKE_CASE_ :str = parser.parse_args()
SCREAMING_SNAKE_CASE_ :str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ :Optional[int] = f.read()
else:
SCREAMING_SNAKE_CASE_ :str = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 233
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCAmelCase )
return parser.parse_args()
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parse_args()
# Import training_script as a module.
UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase = script_fpath.stem
UpperCAmelCase = importlib.import_module(lowerCAmelCase )
# Patch sys.argv
UpperCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 373
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE_ = datasets.load_iris()
SCREAMING_SNAKE_CASE_ = np.array(data['''data'''])
SCREAMING_SNAKE_CASE_ = np.array(data['''target'''])
SCREAMING_SNAKE_CASE_ = data['''target_names''']
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = train_test_split(X, y)
def lowercase__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCAmelCase ) - np.array(lowerCAmelCase ) )
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=5 ) -> str:
"""simple docstring"""
UpperCAmelCase = zip(lowerCAmelCase , lowerCAmelCase )
# List of distances of all points from the point to be classified
UpperCAmelCase = []
for data_point in data:
UpperCAmelCase = euclidean_distance(data_point[0] , lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase = [i[1] for i in sorted(lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase = Counter(lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 373
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCAmelCase ( ):
UpperCAmelCase_ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
UpperCAmelCase_ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(snake_case__ )
DownloadCommand.register_subcommand(snake_case__ )
EnvironmentCommand.register_subcommand(snake_case__ )
RunCommand.register_subcommand(snake_case__ )
ServeCommand.register_subcommand(snake_case__ )
UserCommands.register_subcommand(snake_case__ )
AddNewModelCommand.register_subcommand(snake_case__ )
AddNewModelLikeCommand.register_subcommand(snake_case__ )
LfsCommands.register_subcommand(snake_case__ )
PTtoTFCommand.register_subcommand(snake_case__ )
# Let's go
UpperCAmelCase_ = parser.parse_args()
if not hasattr(snake_case__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase_ = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main()
| 706
|
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 407
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , "env" )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
A__ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE__ , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version="py36" , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
# create estimator
A__ = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
A__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , SCREAMING_SNAKE_CASE__ )
| 104
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = DistilBertTokenizer
lowerCAmelCase_ = DistilBertTokenizerFast
lowerCAmelCase_ = True
@slow
def lowercase (self ) -> Union[str, Any]:
_snake_case = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_snake_case = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase )
_snake_case = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 585
| 0
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
snake_case__ : Any = inspect.getfile(accelerate.test_utils )
snake_case__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
snake_case__ : Optional[int] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case__ : int = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case__ : Union[str, Any] = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
| 718
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCamelCase__ :Iterable[str] , UpperCamelCase__ :int ) -> Generator[tuple[str, ...], None, None]:
snake_case__ : Union[str, Any] = iter(UpperCamelCase__ )
while True:
snake_case__ : Tuple = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> str:
snake_case__ : str = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case__ : List[str] = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
snake_case__ : List[str] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case__ : Union[str, Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str ) -> str:
snake_case__ : List[str] = generate_table(UpperCamelCase__ )
snake_case__ : List[str] = prepare_input(UpperCamelCase__ )
snake_case__ : int = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
snake_case__ , snake_case__ : List[str] = divmod(table.index(UpperCamelCase__ ) , 5 )
snake_case__ , snake_case__ : Tuple = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str ) -> str:
snake_case__ : List[Any] = generate_table(UpperCamelCase__ )
snake_case__ : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
snake_case__ , snake_case__ : Union[str, Any] = divmod(table.index(UpperCamelCase__ ) , 5 )
snake_case__ , snake_case__ : Optional[Any] = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 574
| 0
|
import qiskit
def _SCREAMING_SNAKE_CASE ( __snake_case = 2 ) -> qiskit.result.counts.Counts:
_UpperCAmelCase = qubits
# Using Aer's simulator
_UpperCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(__snake_case , __snake_case )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __snake_case ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __snake_case )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__snake_case ) ) , list(range(__snake_case ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCAmelCase = qiskit.execute(__snake_case , __snake_case , shots=1_0_0_0 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}")
| 108
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 249
| 0
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a : Optional[int] = NewType('''DataClass''', Any)
a : Dict = NewType('''DataClassType''', Any)
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = {str(_UpperCamelCase ): choice for choice in choices}
return lambda _UpperCamelCase : str_to_choice.get(_UpperCamelCase , _UpperCamelCase )
def lowercase_ ( *,
_UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = dataclasses.MISSING , _UpperCamelCase = dataclasses.MISSING , _UpperCamelCase = None , **_UpperCamelCase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowercase = {}
if aliases is not None:
__lowercase = aliases
if help is not None:
__lowercase = help
return dataclasses.field(metadata=_UpperCamelCase , default=_UpperCamelCase , default_factory=_UpperCamelCase , **_UpperCamelCase )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = 42
def __init__( self , snake_case_ , **snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
if "formatter_class" not in kwargs:
__lowercase = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case_ )
if dataclasses.is_dataclass(snake_case_ ):
__lowercase = [dataclass_types]
__lowercase = list(snake_case_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case_ )
@staticmethod
def A ( snake_case_ , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase = F'--{field.name}'
__lowercase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__lowercase = kwargs.pop('''aliases''' , [] )
if isinstance(snake_case_ , snake_case_ ):
__lowercase = [aliases]
__lowercase = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(snake_case_ , '''UnionType''' ) and isinstance(snake_case_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.' )
if type(snake_case_ ) not in field.type.__args__:
# filter `str` in Union
__lowercase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowercase = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowercase = (
field.type.__args__[0] if isinstance(snake_case_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__lowercase = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowercase = {}
if origin_type is Literal or (isinstance(field.type , snake_case_ ) and issubclass(field.type , snake_case_ )):
if origin_type is Literal:
__lowercase = field.type.__args__
else:
__lowercase = [x.value for x in field.type]
__lowercase = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__lowercase = field.default
else:
__lowercase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowercase = copy(snake_case_ )
# Hack because type=bool in argparse does not behave as we want.
__lowercase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowercase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowercase = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowercase = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__lowercase = True
elif isclass(snake_case_ ) and issubclass(snake_case_ , snake_case_ ):
__lowercase = field.type.__args__[0]
__lowercase = '''+'''
if field.default_factory is not dataclasses.MISSING:
__lowercase = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowercase = True
else:
__lowercase = field.type
if field.default is not dataclasses.MISSING:
__lowercase = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowercase = field.default_factory()
else:
__lowercase = True
parser.add_argument(snake_case_ , *snake_case_ , **snake_case_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowercase = False
parser.add_argument(F'--no_{field.name}' , action='''store_false''' , dest=field.name , **snake_case_ )
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
if hasattr(snake_case_ , '''_argument_group_name''' ):
__lowercase = self.add_argument_group(dtype._argument_group_name )
else:
__lowercase = self
try:
__lowercase = get_type_hints(snake_case_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(snake_case_ ):
__lowercase = '''.'''.join(map(snake_case_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(snake_case_ ):
if not field.init:
continue
__lowercase = type_hints[field.name]
self._parse_dataclass_field(snake_case_ , snake_case_ )
def A ( self , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=None , snake_case_=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__lowercase = []
if args_filename:
args_files.append(Path(snake_case_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowercase = ArgumentParser()
args_file_parser.add_argument(snake_case_ , type=snake_case_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__lowercase , __lowercase = args_file_parser.parse_known_args(args=snake_case_ )
__lowercase = vars(snake_case_ ).get(args_file_flag.lstrip('''-''' ) , snake_case_ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case_ ) for p in cmd_args_file_paths] )
__lowercase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowercase = file_args + args if args is not None else file_args + sys.argv[1:]
__lowercase , __lowercase = self.parse_known_args(args=snake_case_ )
__lowercase = []
for dtype in self.dataclass_types:
__lowercase = {f.name for f in dataclasses.fields(snake_case_ ) if f.init}
__lowercase = {k: v for k, v in vars(snake_case_ ).items() if k in keys}
for k in keys:
delattr(snake_case_ , snake_case_ )
__lowercase = dtype(**snake_case_ )
outputs.append(snake_case_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def A ( self , snake_case_ , snake_case_ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
__lowercase = set(args.keys() )
__lowercase = []
for dtype in self.dataclass_types:
__lowercase = {f.name for f in dataclasses.fields(snake_case_ ) if f.init}
__lowercase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__lowercase = dtype(**snake_case_ )
outputs.append(snake_case_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(snake_case_ )}' )
return tuple(snake_case_ )
def A ( self , snake_case_ , snake_case_ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(snake_case_ ) , encoding='''utf-8''' ) as open_json_file:
__lowercase = json.loads(open_json_file.read() )
__lowercase = self.parse_dict(snake_case_ , allow_extra_keys=snake_case_ )
return tuple(snake_case_ )
def A ( self , snake_case_ , snake_case_ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
__lowercase = self.parse_dict(yaml.safe_load(Path(snake_case_ ).read_text() ) , allow_extra_keys=snake_case_ )
return tuple(snake_case_ )
| 527
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a : Optional[int] = datasets.logging.get_logger(__name__)
a : Tuple = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
a : Union[str, Any] = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
a : Union[str, Any] = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
a : Tuple = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__lowercase = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase = score.BleurtScorer(os.path.join(snake_case_ , snake_case_ ) )
def A ( self , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = self.scorer.score(references=snake_case_ , candidates=snake_case_ )
return {"scores": scores}
| 527
| 1
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__snake_case = gray_code_sequence_string(__lowerCAmelCase )
#
# convert them to integers
for i in range(len(__lowerCAmelCase ) ):
__snake_case = int(sequence[i] , 2 )
return sequence
def _UpperCamelCase (_lowerCamelCase : int )-> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case = gray_code_sequence_string(bit_count - 1 )
__snake_case = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case = '''0''' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case = '''1''' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : List[str]=18 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Dict:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Optional[int]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Dict:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Any ) -> Tuple:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Any ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Any ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : int ) -> Union[str, Any]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Any ) -> int:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : List[str] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ) -> Any:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 0
|
from math import factorial, pi
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCAmelCase_ = float(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCAmelCase_ = float(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 23
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = DistilBertTokenizer
_lowercase : Union[str, Any] = DistilBertTokenizerFast
_lowercase : str = True
@slow
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
a__ : Optional[int] =tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a__ : Union[str, Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a__ : str =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a__ : List[Any] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 563
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase : Tuple = """scheduler_config.json"""
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = 1
_lowercase : List[str] = 2
_lowercase : int = 3
_lowercase : str = 4
_lowercase : Optional[Any] = 5
@dataclass
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : jnp.ndarray
class __lowerCAmelCase :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : int = ["""dtype"""]
_lowercase : Union[str, Any] = []
_lowercase : List[Any] = True
@classmethod
def _lowercase ( cls , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
a__ , a__ : Union[str, Any] =cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ , a__ : Dict =cls.from_config(lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , "create_state" ) and getattr(lowerCAmelCase__ , "has_state" , lowerCAmelCase__ ):
a__ : Dict =scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
self.save_config(save_directory=lowerCAmelCase__ , push_to_hub=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _lowercase ( cls ) -> List[str]:
'''simple docstring'''
a__ : int =list(set([cls.__name__] + cls._compatibles ) )
a__ : Dict =importlib.import_module(__name__.split("." )[0] )
a__ : str =[
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) for c in compatible_classes_str if hasattr(lowerCAmelCase__ , lowerCAmelCase__ )
]
return compatible_classes
def _A ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=0.9_9_9 , SCREAMING_SNAKE_CASE : Dict=jnp.floataa ):
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : Union[str, Any] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
a__ : Union[str, Any] =[]
for i in range(SCREAMING_SNAKE_CASE ):
a__ : List[str] =i / num_diffusion_timesteps
a__ : Union[str, Any] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class __lowerCAmelCase :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def _lowercase ( cls , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Tuple =scheduler.config
if config.trained_betas is not None:
a__ : Optional[Any] =jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a__ : List[str] =jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ : Optional[Any] =(
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ : Optional[Any] =betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
a__ : Dict =1.0 - betas
a__ : Tuple =jnp.cumprod(lowerCAmelCase__ , axis=0 )
return cls(
alphas=lowerCAmelCase__ , betas=lowerCAmelCase__ , alphas_cumprod=lowerCAmelCase__ , )
def _A ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
"""simple docstring"""
a__ : Optional[int] =state.alphas_cumprod
a__ : List[Any] =alphas_cumprod[timesteps] ** 0.5
a__ : List[Any] =sqrt_alpha_prod.flatten()
a__ : str =broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
a__ : Dict =(1 - alphas_cumprod[timesteps]) ** 0.5
a__ : Any =sqrt_one_minus_alpha_prod.flatten()
a__ : Optional[int] =broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _A ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
"""simple docstring"""
a__ , a__ : Optional[Any] =get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : str =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _A ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
"""simple docstring"""
a__ , a__ : List[Any] =get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : str =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 563
| 1
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCamelCase ( lowercase : str , lowercase : complex , lowercase : str = "x" , lowercase : float = 10**-10 , lowercase : int = 1 , ) -> complex:
_a = symbols(lowercase )
_a = lambdify(lowercase , lowercase )
_a = lambdify(lowercase , diff(lowercase , lowercase ) )
_a = starting_point
while True:
if diff_function(lowercase ) != 0:
_a = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_a = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 521
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
return 1 if input_a == input_a else 0
def _lowerCamelCase ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 521
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = '''▁'''
lowerCAmelCase : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCAmelCase : Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCAmelCase : Tuple = {'''vinai/bartpho-syllable''': 1024}
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : str = VOCAB_FILES_NAMES
UpperCamelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Optional[int]="<s>" , lowerCAmelCase__ : Optional[int]="<unk>" , lowerCAmelCase__ : Optional[Any]="<pad>" , lowerCAmelCase__ : Tuple="<mask>" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : Any , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case__ = vocab_file
snake_case__ = monolingual_vocab_file
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ = {}
snake_case__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
snake_case__ = cnt
cnt += 1
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
snake_case__ = line.strip().split()[0]
snake_case__ = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
snake_case__ = len(self.fairseq_tokens_to_ids )
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ) -> Optional[int]:
snake_case__ = self.__dict__.copy()
snake_case__ = None
snake_case__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ = [self.cls_token_id]
snake_case__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
return len(self.fairseq_ids_to_tokens )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
snake_case__ = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : int ) -> int:
return self.fairseq_ids_to_tokens[index]
def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : str ) -> Optional[Any]:
snake_case__ = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCAmelCase__ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 214
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE__ ( __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ShapEImgaImgPipeline
UpperCamelCase__ : Union[str, Any] = ['''image''']
UpperCamelCase__ : str = ['''image''']
UpperCamelCase__ : Dict = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ : Optional[int] = False
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return 32
@property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return 8
@property
def UpperCAmelCase_ ( self : Any ) -> str:
torch.manual_seed(0 )
snake_case__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
snake_case__ = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
snake_case__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase_ ( self : int ) -> Tuple:
torch.manual_seed(0 )
snake_case__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
snake_case__ = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
snake_case__ = ShapERenderer(**lowerCAmelCase__ )
return model
def UpperCAmelCase_ ( self : str ) -> int:
snake_case__ = self.dummy_prior
snake_case__ = self.dummy_image_encoder
snake_case__ = self.dummy_image_processor
snake_case__ = self.dummy_renderer
snake_case__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
snake_case__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict=0 ) -> Dict:
snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
snake_case__ = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case__ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
snake_case__ = """cpu"""
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**lowerCAmelCase__ )
snake_case__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
snake_case__ = output.images[0]
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
snake_case__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : str ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
snake_case__ = torch_device == """cpu"""
snake_case__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**lowerCAmelCase__ )
snake_case__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = 1
snake_case__ = 2
snake_case__ = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ = batch_size * [inputs[key]]
snake_case__ = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
snake_case__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
snake_case__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
snake_case__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case__ = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 214
| 1
|
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : Dict = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """tapas"""
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_05_22 , __lowerCamelCase : Optional[Any]=7_68 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : List[str]=30_72 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : Any=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Dict=1e-12 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Dict=10.0 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=1.0 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Union[str, Any]="ratio" , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Optional[Any] , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_sizes
a = initializer_range
a = layer_norm_eps
# Fine-tuning task hyperparameters
a = positive_label_weight
a = num_aggregation_labels
a = aggregation_loss_weight
a = use_answer_as_supervision
a = answer_loss_importance
a = use_normalized_answer_loss
a = huber_loss_delta
a = temperature
a = aggregation_temperature
a = use_gumbel_for_cells
a = use_gumbel_for_aggregation
a = average_approximation_function
a = cell_selection_preference
a = answer_loss_cutoff
a = max_num_rows
a = max_num_columns
a = average_logits_per_cell
a = select_one_column
a = allow_empty_column_selection
a = init_cell_selection_weights_to_zero
a = reset_position_index_per_cell
a = disable_per_token_loss
# Aggregation hyperparameters
a = aggregation_labels
a = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
a = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 662
|
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662
| 1
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowercase = parse(importlib.metadata.version('''torch'''))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
__UpperCamelCase :Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = parse(importlib.metadata.version(SCREAMING_SNAKE_CASE ) )
return operation(SCREAMING_SNAKE_CASE , parse(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 167
|
from string import ascii_uppercase
__lowercase = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
__UpperCamelCase :List[Any] = ''''''
__UpperCamelCase :List[Any] = 0
__UpperCamelCase :Optional[Any] = 0
while div != 1:
__UpperCamelCase , __UpperCamelCase :List[Any] = divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
__UpperCamelCase :List[Any] = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE )]
else:
__UpperCamelCase :Tuple = str(SCREAMING_SNAKE_CASE )
new_value += actual_value
__UpperCamelCase :Optional[Any] = num // base
__UpperCamelCase :Tuple = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 167
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCAmelCase )
if number < 1:
__SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , __UpperCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( a__: Any , a__: Optional[int] ) -> List[Any]:
'''simple docstring'''
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(UpperCAmelCase__ ):
print(F'''{i}\t\t{d}''' )
def lowerCAmelCase__ ( a__: Dict , a__: Dict , a__: Optional[int] ) -> List[str]:
'''simple docstring'''
for j in range(UpperCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( a__: int , a__: Dict , a__: List[str] , a__: Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = [float('inf' )] * vertex_count
_UpperCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
_UpperCAmelCase = distance[u] + w
_UpperCAmelCase = check_negative_cycle(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :Any = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ :Dict = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ :Dict = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ :str = {'src': src, 'dst': dest, 'weight': weight}
lowerCAmelCase__ :Optional[int] = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ :List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 618
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
_SCREAMING_SNAKE_CASE = TensorFlowBenchmark(args=UpperCAmelCase__ )
try:
_SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_SCREAMING_SNAKE_CASE = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
_SCREAMING_SNAKE_CASE = ' '.join(str(UpperCAmelCase__ ).split(' ' )[:-1] )
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = eval(str(UpperCAmelCase__ ).split(' ' )[-1] )
_SCREAMING_SNAKE_CASE = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
_SCREAMING_SNAKE_CASE = full_error_msg + begin_error_msg + str(UpperCAmelCase__ )
raise ValueError(UpperCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 605
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = BertTokenizer
snake_case = BertTokenizerFast
snake_case = True
snake_case = True
snake_case = filter_non_english
def lowerCamelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().setUp()
A_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : str ) -> Tuple:
"""simple docstring"""
A_ = "UNwant\u00E9d,running"
A_ = "unwanted, running"
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A_ = self.tokenizer_class(self.vocab_file )
A_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_snake_case , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = "UNwant\u00E9d,running"
A_ = tokenizer.tokenize(_snake_case )
A_ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(_snake_case )
A_ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# With lower casing
A_ = self.get_tokenizer(do_lower_case=_snake_case )
A_ = self.get_rust_tokenizer(do_lower_case=_snake_case )
A_ = "UNwant\u00E9d,running"
A_ = tokenizer.tokenize(_snake_case )
A_ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(_snake_case )
A_ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase__ ( self : Any ) -> Any:
"""simple docstring"""
A_ = BasicTokenizer(do_lower_case=_snake_case , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A_ = BasicTokenizer()
A_ = "a\n'll !!to?'d of, can't."
A_ = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
A_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A_ = {}
for i, token in enumerate(_snake_case ):
A_ = i
A_ = WordpieceTokenizer(vocab=_snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCamelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
A_ = self.tokenizer_class.from_pretrained("bert-base-uncased" )
A_ = tokenizer.encode("sequence builders" , add_special_tokens=_snake_case )
A_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_snake_case )
A_ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A_ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A_ = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A_ = tokenizer_r.encode_plus(
_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , )
A_ = tokenizer_r.do_lower_case if hasattr(_snake_case , "do_lower_case" ) else False
A_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A_ = ["的", "人", "有"]
A_ = "".join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ = True
A_ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A_ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A_ = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
A_ = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
A_ = tokenizer_r.convert_ids_to_tokens(_snake_case )
A_ = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = False
A_ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A_ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A_ = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
A_ = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
A_ = tokenizer_r.convert_ids_to_tokens(_snake_case )
A_ = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
A_ = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 482
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ) -> List[Any]:
"""simple docstring"""
A_ = {}
def lowerCamelCase__ ( self : List[Any] , _snake_case : str ) -> None:
"""simple docstring"""
A_ = {}
def lowerCamelCase__ ( self : Optional[int] , _snake_case : str , _snake_case : str , _snake_case : float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(_snake_case )
if nodea not in self.connections:
self.add_node(_snake_case )
A_ = probability
def lowerCamelCase__ ( self : Tuple ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str ) -> str:
"""simple docstring"""
A_ = 0
A_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
A_ = Counter(graph.get_nodes() )
A_ = start
for _ in range(__a ):
A_ = graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482
| 1
|
"""simple docstring"""
import math
class lowercase :
def __init__(self : Any ,SCREAMING_SNAKE_CASE_ : str=0 ) -> Dict: # a graph with Node 0,1,...,N-1
"""simple docstring"""
lowerCAmelCase = n
lowerCAmelCase = [
[math.inf for j in range(0 ,SCREAMING_SNAKE_CASE_ )] for i in range(0 ,SCREAMING_SNAKE_CASE_ )
] # adjacency matrix for weight
lowerCAmelCase = [
[math.inf for j in range(0 ,SCREAMING_SNAKE_CASE_ )] for i in range(0 ,SCREAMING_SNAKE_CASE_ )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : Any ,SCREAMING_SNAKE_CASE_ : Tuple ,SCREAMING_SNAKE_CASE_ : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase = w
def UpperCAmelCase (self : Dict ) -> Optional[int]:
"""simple docstring"""
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
lowerCAmelCase = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : Tuple ,SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 535
|
"""simple docstring"""
UpperCAmelCase = 8.3_144_598
def __magic_name__ ( _lowerCamelCase: float, _lowerCamelCase: float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase = 3_0_0
UpperCAmelCase = 2_8
UpperCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 535
| 1
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : List[Any] = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : str = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase_ ) == 26
def _snake_case ( UpperCamelCase : Tuple = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase : List[Any] = [False] * 26
for char in input_str:
if char.islower():
UpperCAmelCase : List[Any] = True
elif char.isupper():
UpperCAmelCase : Optional[int] = True
return all(lowerCAmelCase_ )
def _snake_case ( UpperCamelCase : Optional[int] = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _snake_case ( ):
from timeit import timeit
UpperCAmelCase : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("""is_pangram()""" , setup=lowerCAmelCase_ ) )
print(timeit("""is_pangram_faster()""" , setup=lowerCAmelCase_ ) )
print(timeit("""is_pangram_fastest()""" , setup=lowerCAmelCase_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 707
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = PriorTransformer
__lowerCAmelCase : Dict = 'hidden_states'
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Union[str, Any] = 7
UpperCAmelCase : Any = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> Any:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = 4
UpperCAmelCase : Any = 8
UpperCAmelCase : List[Any] = 7
UpperCAmelCase : Any = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
UpperCAmelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = self.model_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
UpperCAmelCase : Dict = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , """set_default_attn_processor""" ):
model.set_default_attn_processor()
UpperCAmelCase : str = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Dict = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase : Dict = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=77 , _SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = embedding_dim
UpperCAmelCase : Tuple = num_embeddings
UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase : int = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 359
| 0
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 400_0000 ):
lowercase__ : List[Any] = [0, 1]
lowercase__ : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase__ : Dict = 0
for j in range(len(UpperCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 152
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = LayoutLMTokenizer
SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
lowercase__ : Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[int]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowercase__ : int = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file )
lowercase__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
| 152
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: # noqa: E741
"""simple docstring"""
while r - l > 1:
_SCREAMING_SNAKE_CASE = (l + r) // 2
if v[m] >= key:
_SCREAMING_SNAKE_CASE = m
else:
_SCREAMING_SNAKE_CASE = m # noqa: E741
return r
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
_SCREAMING_SNAKE_CASE = [0] * len(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = v[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] < tail[0]:
_SCREAMING_SNAKE_CASE = v[i]
elif v[i] > tail[length - 1]:
_SCREAMING_SNAKE_CASE = v[i]
length += 1
else:
_SCREAMING_SNAKE_CASE = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Dict:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
| 0
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case_ ( UpperCamelCase__ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = None
def __init__( self , a_=None , a_=None , a_=None , a_="<unk>" , a_="<s>" , a_="</s>" , a_="<pad>" , a_=False , a_=False , **a_ , ):
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , **lowerCAmelCase__ , )
a_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
a_ : str = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) )
a_ : Tuple = add_prefix_space
a_ : Dict = pre_tok_class(**lowerCAmelCase__ )
a_ : Tuple = add_prefix_space
def snake_case_ ( self , *a_ , **a_ ):
a_ : Union[str, Any] = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ ( self , *a_ , **a_ ):
a_ : Dict = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ ( self , a_ , a_ = None ):
a_ : Optional[int] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def snake_case_ ( self , a_ ):
a_ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
a_ : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 237
|
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Any:
'''simple docstring'''
a__ : Dict =data
a__ : str =previous
a__ : Any =next_node
def __str__( self ) -> str:
'''simple docstring'''
return F'''{self.data}'''
def _lowercase ( self ) -> int:
'''simple docstring'''
return self.data
def _lowercase ( self ) -> Any:
'''simple docstring'''
return self.next
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.previous
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : List[Any] =head
def __iter__( self ) -> Tuple:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__ : Union[str, Any] =self.current.get_data()
a__ : Dict =self.current.get_next()
return value
class __lowerCAmelCase :
def __init__( self ) -> int:
'''simple docstring'''
a__ : List[str] =None # First node in list
a__ : List[str] =None # Last node in list
def __str__( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.head
a__ : Dict =[]
while current is not None:
nodes.append(current.get_data() )
a__ : int =current.get_next()
return " ".join(str(lowerCAmelCase__ ) for node in nodes )
def __contains__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.head
while current:
if current.get_data() == value:
return True
a__ : str =current.get_next()
return False
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return LinkedListIterator(self.head )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def _lowercase ( self ) -> Any:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if self.head is None:
a__ : Any =node
a__ : Dict =node
else:
self.insert_before_node(self.head , lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowerCAmelCase__ )
else:
self.insert_after_node(self.tail , lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Optional[Any] =Node(lowerCAmelCase__ )
if self.head is None:
self.set_head(lowerCAmelCase__ )
else:
self.set_tail(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : str =node
a__ : str =node.previous
if node.get_previous() is None:
a__ : Dict =node_to_insert
else:
a__ : Dict =node_to_insert
a__ : Union[str, Any] =node_to_insert
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Any =node
a__ : int =node.next
if node.get_next() is None:
a__ : List[Any] =node_to_insert
else:
a__ : Optional[int] =node_to_insert
a__ : Dict =node_to_insert
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Union[str, Any] =1
a__ : str =Node(lowerCAmelCase__ )
a__ : Optional[Any] =self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase__ , lowerCAmelCase__ )
return
current_position += 1
a__ : Any =node.next
self.insert_after_node(self.tail , lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Node:
'''simple docstring'''
a__ : List[Any] =self.head
while node:
if node.get_data() == item:
return node
a__ : Any =node.get_next()
raise Exception("Node not found" )
def _lowercase ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if (node := self.get_node(lowerCAmelCase__ )) is not None:
if node == self.head:
a__ : List[Any] =self.head.get_next()
if node == self.tail:
a__ : Optional[int] =self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase__ )
@staticmethod
def _lowercase ( lowerCAmelCase__ ) -> None:
'''simple docstring'''
if node.get_next():
a__ : Optional[Any] =node.previous
if node.get_previous():
a__ : str =node.next
a__ : Any =None
a__ : int =None
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return self.head is None
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563
| 0
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
lowercase__ :Any = logging.getLogger(__name__)
lowercase__ :Optional[int] = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = 'bertabs'
def __init__( self : List[Any] , __lowercase : Optional[int]=30_522 , __lowercase : List[Any]=512 , __lowercase : Any=6 , __lowercase : Union[str, Any]=512 , __lowercase : Union[str, Any]=8 , __lowercase : Any=512 , __lowercase : int=0.2 , __lowercase : List[str]=6 , __lowercase : int=768 , __lowercase : List[str]=8 , __lowercase : List[str]=2_048 , __lowercase : int=0.2 , **__lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : List[str] = max_pos
__UpperCAmelCase : List[Any] = enc_layers
__UpperCAmelCase : str = enc_hidden_size
__UpperCAmelCase : str = enc_heads
__UpperCAmelCase : Tuple = enc_ff_size
__UpperCAmelCase : Any = enc_dropout
__UpperCAmelCase : int = dec_layers
__UpperCAmelCase : Union[str, Any] = dec_hidden_size
__UpperCAmelCase : Optional[Any] = dec_heads
__UpperCAmelCase : Dict = dec_ff_size
__UpperCAmelCase : Tuple = dec_dropout
| 374
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase__ :int = 'src/transformers'
lowercase__ :List[str] = 'docs/source/en/tasks'
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
"""simple docstring"""
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Any = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[Any] = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :Any = direct_transformers_import(TRANSFORMERS_PATH)
lowercase__ :List[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase__ :Union[str, Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCAmelCase : Dict = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase_ , set() )
__UpperCAmelCase : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=False ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__UpperCAmelCase : List[str] = get_model_list_for_task(UpperCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
lowercase__ :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 374
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Tuple = 1.6021E-19 # units = C
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase__ = 4
UpperCamelCase__ = 3
class A ( UpperCAmelCase_ ):
pass
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
for shard in shards:
for i in range(__A ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = int(os.environ["RANK"] )
UpperCAmelCase__ = int(os.environ["WORLD_SIZE"] )
UpperCAmelCase__ = ArgumentParser()
parser.add_argument("--streaming", type=__A )
parser.add_argument("--local_rank", type=__A )
parser.add_argument("--num_workers", type=__A, default=0 )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.streaming
UpperCAmelCase__ = args.num_workers
UpperCAmelCase__ = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(__A )]}
UpperCAmelCase__ = IterableDataset.from_generator(__A, gen_kwargs=__A )
if not streaming:
UpperCAmelCase__ = Dataset.from_list(list(__A ) )
UpperCAmelCase__ = split_dataset_by_node(__A, rank=__A, world_size=__A )
UpperCAmelCase__ = torch.utils.data.DataLoader(__A, num_workers=__A )
UpperCAmelCase__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 486
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : Tuple = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ['ViTFeatureExtractor']
__a : Optional[int] = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__a : Optional[int] = logging.getLogger()
__a : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def lowercase ( self: List[Any] , __A: List[Any] ):
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
a__ = {'''source''': '''What is love ?''', '''target''': '''life'''}
a__ = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
a__ = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(__A , F'{split}.{field}' ) , '''w''' ) as f:
f.write(__A )
def lowercase ( self: Optional[int] , __A: int , __A: str = "pytorch" ):
'''simple docstring'''
a__ = self.get_auto_remove_tmp_dir()
a__ = os.path.join(__A , '''output''' )
a__ = os.path.join(__A , '''data''' )
self._create_dummy_data(data_dir=__A )
a__ = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(F'--gpus={gpus}' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
a__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__A , env=self.get_env() )
a__ = os.path.join(__A , '''metrics.json''' )
with open(__A ) as f:
a__ = json.load(__A )
return result
@require_torch_gpu
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self: str ):
'''simple docstring'''
a__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self: Dict ):
'''simple docstring'''
a__ = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self: str ):
'''simple docstring'''
a__ = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 200
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __a (unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A , config_name=_A )
SCREAMING_SNAKE_CASE__ : Any = GenerationConfig.from_pretrained(_A , config_name=_A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _A )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoConfig.from_pretrained("""gpt2""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig.from_model_config(_A )
SCREAMING_SNAKE_CASE__ : List[str] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_A , _A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = GenerationConfig()
SCREAMING_SNAKE_CASE__ : Dict = {
'''max_new_tokens''': 1_024,
'''foo''': '''bar''',
}
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE__ : int = generation_config.update(**_A )
# update_kwargs was not modified (no side effects)
self.assertEqual(_A , _A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_A , {"""foo""": """bar"""} )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig()
SCREAMING_SNAKE_CASE__ : List[str] = '''bar'''
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig.from_pretrained(_A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig.from_model_config(_A )
assert not hasattr(_A , """foo""" ) # no new kwargs should be initialized if from config
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _A )
self.assertEqual(default_config.num_beams , 1 )
SCREAMING_SNAKE_CASE__ : List[str] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GenerationConfig.from_pretrained(_A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __a (unittest.TestCase):
'''simple docstring'''
@classmethod
def _a ( cls ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TOKEN
HfFolder.save_token(_A )
@classmethod
def _a ( cls ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A , repo_id="""test-generation-config""" , push_to_hub=_A , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig(
do_sample=_A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=_A , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A ) )
| 680
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase : Optional[int] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __magic_name__ )
if matches:
lowercase : Optional[int] = float(matches[1] )
lowercase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase : Optional[int] = 10_01
lowercase : str = '''imagenet-1k-id2label.json'''
lowercase : Optional[Any] = '''huggingface/label-files'''
lowercase : Optional[int] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any = {int(__magic_name__ ) + 1: v for k, v in idalabel.items()}
lowercase : Any = '''background'''
lowercase : Any = idalabel
lowercase : Any = {v: k for k, v in idalabel.items()}
return config
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> int:
'''simple docstring'''
lowercase : int = get_mobilenet_va_config(__magic_name__ )
# Load 🤗 model
lowercase : Tuple = MobileNetVaForImageClassification(__magic_name__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__magic_name__ , __magic_name__ , __magic_name__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase : List[Any] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Optional[Any] = model(**__magic_name__ )
lowercase : int = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowercase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase : Optional[Any] = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase : int = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase : int = '''google/''' + model_name
image_processor.push_to_hub(__magic_name__ )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 217
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'vivit'
def __init__( self , lowercase=224 , lowercase=32 , lowercase=[2, 16, 16] , lowercase=3 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu_fast" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-06 , lowercase=True , **lowercase , ) -> List[str]:
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = num_frames
lowerCamelCase_ = tubelet_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
super().__init__(**lowercase )
| 313
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.0_2 , ) -> Tuple:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = FlaxViTModel(config=lowercase )
lowerCamelCase_ = model(lowercase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (self.image_size, self.image_size)
lowerCamelCase_ = (self.patch_size, self.patch_size)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FlaxViTForImageClassification(config=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FlaxViTForImageClassification(lowercase )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = FlaxViTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase )
lowerCamelCase_ = model_class(lowercase )
@jax.jit
def model_jitted(lowercase , **lowercase ):
return model(pixel_values=lowercase , **lowercase )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ = model_jitted(**lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ = model_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
lowerCamelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase )
| 313
| 1
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_( A : Optional[int] , A : Optional[Any]):
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw).convert('RGB')
UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711)),
])
UpperCamelCase = transform(A).unsqueeze(0).to(A)
return image
def A_( A : Any):
if "visual_encoder" in key:
UpperCamelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , A)
if "blocks" in key:
UpperCamelCase = re.sub(r'blocks' , 'layers' , A)
if "attn" in key:
UpperCamelCase = re.sub(r'attn' , 'self_attn' , A)
if "norm1" in key:
UpperCamelCase = re.sub(r'norm1' , 'layer_norm1' , A)
if "norm2" in key:
UpperCamelCase = re.sub(r'norm2' , 'layer_norm2' , A)
if "encoder.norm" in key:
UpperCamelCase = re.sub(r'encoder.norm' , 'post_layernorm' , A)
if "encoder.patch_embed.proj" in key:
UpperCamelCase = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , A)
if "encoder.pos_embed" in key:
UpperCamelCase = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , A)
if "encoder.cls_token" in key:
UpperCamelCase = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , A)
if "self_attn" in key:
UpperCamelCase = re.sub(r'self_attn.proj' , 'self_attn.projection' , A)
return key
@torch.no_grad()
def A_( A : List[str] , A : Any=None):
if config_path is not None:
UpperCamelCase = BlipConfig.from_pretrained(A)
else:
UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
UpperCamelCase = BlipForConditionalGeneration(A).eval()
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
UpperCamelCase = blip_decoder(pretrained=A , image_size=384 , vit='base')
UpperCamelCase = pt_model.eval()
UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
hf_model.load_state_dict(A)
UpperCamelCase = 384
UpperCamelCase = load_demo_image(image_size=A , device='cpu')
UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased')
UpperCamelCase = tokenizer(['a picture of']).input_ids
UpperCamelCase = hf_model.generate(A , A)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase = hf_model.generate(A)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
UpperCamelCase = blip_vqa(pretrained=A , image_size=A , vit='base')
vqa_model.eval()
UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
UpperCamelCase = BlipForQuestionAnswering(A)
hf_vqa_model.load_state_dict(A)
UpperCamelCase = ['How many dogs are in this image?']
UpperCamelCase = tokenizer(A , return_tensors='pt').input_ids
UpperCamelCase = hf_vqa_model.generate(A , A)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa')
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
UpperCamelCase = blip_itm(pretrained=A , image_size=A , vit='base')
itm_model.eval()
UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
UpperCamelCase = BlipForImageTextRetrieval(A)
UpperCamelCase = ['A picture of a woman with a dog sitting in a beach']
UpperCamelCase = tokenizer(
A , return_tensors='pt' , padding='max_length' , truncation=A , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(A)
hf_itm_model.eval()
UpperCamelCase = hf_itm_model(A , A , use_itm_head=A)
UpperCamelCase = hf_itm_model(A , A , use_itm_head=A)
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm')
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 3
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A=1_3 , _A=1_0 , _A=3 , _A=2 , _A=2 , _A=2 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=0.9 , _A=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =tubelet_size
_SCREAMING_SNAKE_CASE =num_frames
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =mask_ratio
_SCREAMING_SNAKE_CASE =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_SCREAMING_SNAKE_CASE =int(mask_ratio * self.seq_length )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VideoMAEModel(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE =model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VideoMAEForPreTraining(_A )
model.to(_A )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_SCREAMING_SNAKE_CASE =torch.ones((self.num_masks,) )
_SCREAMING_SNAKE_CASE =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_SCREAMING_SNAKE_CASE =mask.expand(self.batch_size , -1 ).bool()
_SCREAMING_SNAKE_CASE =model(_A , _A )
# model only returns predictions for masked patches
_SCREAMING_SNAKE_CASE =mask.sum().item()
_SCREAMING_SNAKE_CASE =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : str = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase : Dict = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase : List[Any] = False
lowercase : List[Any] = False
lowercase : Optional[Any] = False
lowercase : Optional[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VideoMAEModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def UpperCamelCase_ ( self , _A , _A , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(_A )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_SCREAMING_SNAKE_CASE =torch.ones((self.model_tester.num_masks,) )
_SCREAMING_SNAKE_CASE =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_SCREAMING_SNAKE_CASE =mask.expand(self.model_tester.batch_size , -1 ).bool()
_SCREAMING_SNAKE_CASE =bool_masked_pos.to(_A )
if return_labels:
if model_class in [
*get_values(_A ),
]:
_SCREAMING_SNAKE_CASE =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =VideoMAEModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =self.model_tester.seq_length - self.model_tester.num_masks
_SCREAMING_SNAKE_CASE =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_A , _A ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_A , _A ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_SCREAMING_SNAKE_CASE =len(_A )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 1 , len(_A ) )
_SCREAMING_SNAKE_CASE =outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(_A , _A , _A ):
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_A , _A ) )
_SCREAMING_SNAKE_CASE =outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_A ) , _A )
_SCREAMING_SNAKE_CASE =self.model_tester.seq_length - self.model_tester.num_masks
_SCREAMING_SNAKE_CASE =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_A , _A , _A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase() -> Union[str, Any]:
_SCREAMING_SNAKE_CASE =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_SCREAMING_SNAKE_CASE =np.load(a )
return list(a )
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
_A )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_video()
_SCREAMING_SNAKE_CASE =image_processor(_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_A )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , _A )
_SCREAMING_SNAKE_CASE =torch.tensor([0.3669, -0.0688, -0.2421] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(_A )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_video()
_SCREAMING_SNAKE_CASE =image_processor(_A , return_tensors='''pt''' ).to(_A )
# add boolean mask, indicating which patches to mask
_SCREAMING_SNAKE_CASE =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_SCREAMING_SNAKE_CASE =torch.load(_A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_A )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size([1, 1_4_0_8, 1_5_3_6] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_A )
self.assertEqual(outputs.logits.shape , _A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_SCREAMING_SNAKE_CASE =torch.tensor([0.5142] , device=_A )
self.assertTrue(torch.allclose(outputs.loss , _A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_SCREAMING_SNAKE_CASE =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=_A ).to(
_A )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_A )
_SCREAMING_SNAKE_CASE =torch.tensor(torch.tensor([0.6469] ) , device=_A )
self.assertTrue(torch.allclose(outputs.loss , _A , atol=1E-4 ) )
| 255
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
a_ :int = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any, *_snake_case : Tuple, **_snake_case : List[str] ) ->None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.', _snake_case, )
super().__init__(*_snake_case, **_snake_case )
| 243
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :str = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[int] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 243
| 1
|
import unittest
from knapsack import greedy_knapsack as kp
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = [10, 20, 30, 40, 50, 60]
__UpperCAmelCase = [2, 4, 6, 8, 10, 12]
__UpperCAmelCase = 100
self.assertEqual(kp.calc_profit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 210 )
def _UpperCAmelCase ( self: int ) -> str:
'''simple docstring'''
self.assertRaisesRegex(__UpperCAmelCase , "max_weight must greater than zero." )
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(__UpperCAmelCase , "Weight can not be negative." )
def _UpperCAmelCase ( self: List[Any] ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(__UpperCAmelCase , "Profit can not be negative." )
def _UpperCAmelCase ( self: Any ) -> Optional[int]:
'''simple docstring'''
self.assertRaisesRegex(__UpperCAmelCase , "max_weight must greater than zero." )
def _UpperCAmelCase ( self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertRaisesRegex(
__UpperCAmelCase , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 221
|
import os
def lowerCAmelCase_ ( __A = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__A ), __A ) ) as in_file:
UpperCAmelCase__ = in_file.read()
UpperCAmelCase__ = [[int(__A ) for cell in row.split("," )] for row in data.strip().splitlines()]
UpperCAmelCase__ = [[0 for cell in row] for row in grid]
UpperCAmelCase__ = len(grid[0] )
UpperCAmelCase__ = [[0 for i in range(__A )] for j in range(__A )]
UpperCAmelCase__ = grid[0][0]
for i in range(1, __A ):
UpperCAmelCase__ = grid[0][i] + dp[0][i - 1]
for i in range(1, __A ):
UpperCAmelCase__ = grid[i][0] + dp[i - 1][0]
for i in range(1, __A ):
for j in range(1, __A ):
UpperCAmelCase__ = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 486
| 0
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowerCAmelCase ( *UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Union[Dict, Any]] = None , UpperCamelCase__: int=True , UpperCamelCase__: List[str]=2 ) -> Any:
"""simple docstring"""
from .. import __version__
A = take_from
A = ()
if not isinstance(args[0] , UpperCamelCase__ ):
A = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase__ ).base_version ) >= version.parse(UpperCamelCase__ ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
A = None
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase__ ),)
A = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
values += (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
A = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase__ , stacklevel=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
A = inspect.getouterframes(inspect.currentframe() )[1]
A = call_frame.filename
A = call_frame.lineno
A = call_frame.function
A , A = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(UpperCamelCase__ ) == 0:
return
elif len(UpperCamelCase__ ) == 1:
return values[0]
return values
| 712
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 546
| 0
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> Dict:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowerCamelCase_ = nn.Parameter(__UpperCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowerCamelCase_ = nn.Parameter(__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase_ = np.asarray(weights[0] )
lowerCamelCase_ = np.asarray(weights[1] )
lowerCamelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(__UpperCamelCase ).view(-1 ,__UpperCamelCase ).contiguous().transpose(0 ,1 ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# set torch weights for 1-to-1 comparison
lowerCamelCase_ = np.asarray(weights[0] )
lowerCamelCase_ = np.asarray(weights[1] )
lowerCamelCase_ = np.asarray(weights[2] )
lowerCamelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(__UpperCamelCase ).transpose(1 ,2 ).contiguous().view(-1 ,__UpperCamelCase ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(__UpperCamelCase ).view(-1 ,__UpperCamelCase ).contiguous().transpose(0 ,1 ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
# layernorm 1
lowerCamelCase_ = weights[0][0][0]
lowerCamelCase_ = np.asarray(layer_norm_a[0] )
lowerCamelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(__UpperCamelCase ) ,torch.tensor(__UpperCamelCase ) ,)
# lsh weights + output
lowerCamelCase_ = weights[0][1]
if len(__UpperCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__UpperCamelCase ,torch_block.attention ,__UpperCamelCase )
else:
set_layer_weights_in_torch_local(__UpperCamelCase ,torch_block.attention ,__UpperCamelCase )
# intermediate weighs
lowerCamelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__UpperCamelCase ) == 4:
lowerCamelCase_ = intermediate_weights[2]
# layernorm 2
lowerCamelCase_ = np.asarray(intermediate_weights[0][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(__UpperCamelCase ) ,torch.tensor(__UpperCamelCase ) ,)
# intermediate dense
lowerCamelCase_ = np.asarray(intermediate_weights[1][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(__UpperCamelCase ).transpose(0 ,1 ).contiguous() ,torch.tensor(__UpperCamelCase ) ,)
# intermediate out
lowerCamelCase_ = np.asarray(intermediate_weights[4][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(__UpperCamelCase ).transpose(0 ,1 ).contiguous() ,torch.tensor(__UpperCamelCase ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
# reformer model
lowerCamelCase_ = torch_model.reformer
# word embeds
lowerCamelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(__UpperCamelCase ) ,)
if isinstance(weights[3] ,__UpperCamelCase ):
lowerCamelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowerCamelCase_ = nn.Parameter(torch.tensor(__UpperCamelCase ) )
lowerCamelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__UpperCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# output layer norm
lowerCamelCase_ = np.asarray(weights[7][0] )
lowerCamelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(__UpperCamelCase ) ,torch.tensor(__UpperCamelCase ) ,)
# output embeddings
lowerCamelCase_ = np.asarray(weights[9][0] )
lowerCamelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(__UpperCamelCase ).transpose(0 ,1 ).contiguous() ,torch.tensor(__UpperCamelCase ) ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
# Initialise PyTorch model
lowerCamelCase_ = ReformerConfig.from_json_file(__UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ = ReformerModelWithLMHead(__UpperCamelCase )
with open(__UpperCamelCase ,'rb' ) as f:
lowerCamelCase_ = pickle.load(__UpperCamelCase )['weights']
set_model_weights_in_torch(__UpperCamelCase ,__UpperCamelCase ,config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowercase )
while len(lowercase ) != 1:
lowercase__ = [int(lowercase ) for i in num_string]
lowercase__ = 1
for i in range(0 , len(lowercase ) ):
total *= numbers[i]
lowercase__ = str(lowercase )
steps += 1
return steps
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowercase )
while len(lowercase ) != 1:
lowercase__ = [int(lowercase ) for i in num_string]
lowercase__ = 0
for i in range(0 , len(lowercase ) ):
total += numbers[i]
lowercase__ = str(lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowercase )
while len(lowercase ) != 1:
lowercase__ = [int(lowercase ) for i in num_string]
lowercase__ = 1
for i in range(0 , len(lowercase ) ):
total *= numbers[i]
lowercase__ = str(lowercase )
steps += 1
return steps
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowercase )
while len(lowercase ) != 1:
lowercase__ = [int(lowercase ) for i in num_string]
lowercase__ = 0
for i in range(0 , len(lowercase ) ):
total += numbers[i]
lowercase__ = str(lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
import operator
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None ):
__magic_name__ : Any =operator.lt if reverse else operator.gt
__magic_name__ : Union[str, Any] =solution or []
if not arr:
return solution
__magic_name__ : Optional[Any] =[arr.pop(0 )]
for i, item in enumerate(lowerCamelCase ):
if _operator(lowerCamelCase , sublist[-1] ):
sublist.append(lowerCamelCase )
arr.pop(lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase )
else:
while sublist:
__magic_name__ : List[Any] =sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase ):
if not _operator(lowerCamelCase , lowerCamelCase ):
solution.insert(lowerCamelCase , lowerCamelCase )
break
else:
solution.append(lowerCamelCase )
strand_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 21
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_a : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 479
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCAmelCase_ : int = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
a_ : Union[str, Any] = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : List[str] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=None ) -> Any:
a_ : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
a_ : Any = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
a_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
a_ : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : Tuple = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
a_ : Tuple = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('DDPM' , 'Test' , SCREAMING_SNAKE_CASE__ ) , )
| 443
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Tuple , __A : int , __A : Any=None , __A : str=None , __A : Any=None , __A : Optional[int]=None , __A : str=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
a_ : List[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a_ : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a_ : Any = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__A )
if decoder_head_mask is None:
a_ : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__A )
if cross_attn_head_mask is None:
a_ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=1_3 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=9_9 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="relu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , ) -> Dict:
a_ : Dict = parent
a_ : List[str] = batch_size
a_ : Optional[int] = seq_length
a_ : str = is_training
a_ : List[str] = use_labels
a_ : Optional[Any] = vocab_size
a_ : str = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : Any = num_attention_heads
a_ : Tuple = intermediate_size
a_ : int = hidden_act
a_ : List[Any] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Dict = encoder_layerdrop
a_ : Union[str, Any] = decoder_layerdrop
a_ : Tuple = max_position_embeddings
a_ : Tuple = eos_token_id
a_ : Optional[int] = pad_token_id
a_ : Union[str, Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : List[str] = self.eos_token_id # Eos Token
a_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a_ : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
a_ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
a_ : Any = self.get_config()
a_ : List[str] = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
a_ , a_ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
a_ : Tuple = MaMaaaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
a_ : List[str] = inputs_dict['input_ids']
a_ : List[str] = inputs_dict['attention_mask']
a_ : int = inputs_dict['head_mask']
# first forward pass
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
a_ , a_ : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
a_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ : str = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )['last_hidden_state']
a_ : int = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[
'last_hidden_state'
]
# select random slice
a_ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-2 ) )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
a_ : int = MaMaaaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
a_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
a_ : str = outputs.encoder_last_hidden_state
a_ : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
a_ : int = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Tuple = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : int = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ : Union[str, Any] = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case__ : str = True
snake_case__ : Dict = True
snake_case__ : Dict = False
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : Optional[int] = MaMaaaModelTester(self )
a_ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a_ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Optional[Any] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info['missing_keys'] , [] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
a_ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not self.is_encoder_decoder:
a_ : int = inputs['input_ids']
del inputs["input_ids"]
else:
a_ : str = inputs['input_ids']
a_ : str = inputs.get('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
a_ : Any = wte(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[int] = wte(SCREAMING_SNAKE_CASE__ )
a_ : Any = wte(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )[0]
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs()
a_ : Union[str, Any] = input_dict['input_ids']
a_ : List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval().to(SCREAMING_SNAKE_CASE__ )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(__A , dtype=torch.long , device=__A )
UpperCAmelCase_ : int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Union[str, Any] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
a_ : List[Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
a_ : List[Any] = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
a_ : Tuple = model(**SCREAMING_SNAKE_CASE__ )[0]
a_ : int = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# change to expected output here
a_ : Optional[Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
a_ : int = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE__ )
# change to intended input
a_ : List[str] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
a_ : Optional[int] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
a_ : int = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
a_ : List[str] = model(**SCREAMING_SNAKE_CASE__ )[0]
a_ : int = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# change to expected output here
a_ : Dict = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
a_ : Dict = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
a_ : List[str] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
a_ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
a_ : Optional[int] = model.generate(
input_ids=dct['input_ids'].to(SCREAMING_SNAKE_CASE__ ) , attention_mask=dct['attention_mask'].to(SCREAMING_SNAKE_CASE__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
a_ : Any = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
a_ : int = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
assert generated == expected_en
| 443
| 1
|
from __future__ import annotations
def snake_case__ ( lowerCamelCase_ ):
if not nums:
return 0
A : Any = nums[0]
A : Any = 0
for num in nums[1:]:
A , A : Optional[int] = (
max_excluding + num,
max(lowerCamelCase_ , lowerCamelCase_ ),
)
return max(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Tuple = logging.get_logger(__name__)
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PIL.Image.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
A : Any = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
A : Any = get_size_dict(__UpperCAmelCase )
A : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
A : List[Any] = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
A : Dict = do_resize
A : Tuple = size
A : Union[str, Any] = resample
A : Dict = do_center_crop
A : int = crop_size
A : Union[str, Any] = do_rescale
A : str = rescale_factor
A : Optional[Any] = do_normalize
A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PIL.Image.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
__UpperCAmelCase , size=(size['''height'''], size['''width''']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : Optional[Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
A : Optional[int] = do_resize if do_resize is not None else self.do_resize
A : Optional[Any] = resample if resample is not None else self.resample
A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Dict = image_std if image_std is not None else self.image_std
A : List[Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(__UpperCAmelCase )
A : Dict = crop_size if crop_size is not None else self.crop_size
A : Tuple = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
A : Any = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
A : List[str] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
A : Dict = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
A : Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
A : Tuple = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
A : Optional[Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
A : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 542
| 1
|
'''simple docstring'''
import heapq
def _lowerCAmelCase ( __snake_case : dict ) -> set[int]:
__A : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case , [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
__A : Any = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__A : Tuple = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__A : str = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : str = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 338
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> str:
__A : int = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Any = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=__snake_case , output_all_encodings=__snake_case , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , __snake_case ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Optional[int] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : int = os.path.join(get_home_dir() , 'models' )
__A : Optional[int] = _load_vocab(__snake_case , __snake_case , __snake_case , cls=__snake_case )
__A : List[Any] = nlp.model.BERTModel(
__snake_case , len(__snake_case ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=__snake_case , use_token_type_embed=__snake_case , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=__snake_case , use_decoder=__snake_case , )
original_bort.load_parameters(__snake_case , cast_dtype=__snake_case , ignore_extra=__snake_case )
__A : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Dict = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(__snake_case ),
}
__A : Optional[int] = BertConfig.from_dict(__snake_case )
__A : Any = BertForMaskedLM(__snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__snake_case : List[str] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__snake_case : Union[str, Any] , __snake_case : int ):
__A : str = hf_param.shape
__A : int = to_torch(params[gluon_param] )
__A : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Optional[int] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__A : List[Any] = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__A : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__A : str = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
__A : Union[str, Any] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
__A : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
__A : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : Optional[int] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__A : str = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__A : BertOutput = layer.output
__A : List[str] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__A : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__A : List[Any] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__A : Any = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : List[Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__A : int = tokenizer.encode_plus(__snake_case )['input_ids']
# Get gluon output
__A : List[Any] = mx.nd.array([input_ids] )
__A : Optional[int] = original_bort(inputs=__snake_case , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__snake_case )
__A : List[Any] = BertModel.from_pretrained(__snake_case )
hf_bort_model.eval()
__A : Dict = tokenizer.encode_plus(__snake_case , return_tensors='pt' )
__A : List[Any] = hf_bort_model(**__snake_case )[0]
__A : Tuple = output_gluon[0].asnumpy()
__A : Any = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : str = np.allclose(__snake_case , __snake_case , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , __snake_case )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 338
| 1
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Base Case
if index == len(__lowerCamelCase ):
return True
# Recursive Step
for i in range(__lowerCamelCase ):
if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ):
# Color current vertex
__snake_case : List[str] = i
# Validate coloring
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ):
return True
# Backtrack
__snake_case : str = -1
return False
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = [-1] * len(__lowerCamelCase )
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ):
return colored_vertices
return []
| 81
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
_lowerCAmelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
_lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
_lowerCAmelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
_lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
_lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
_lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(lowerCAmelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
_lowerCAmelCase = config.decoder_hidden_size
_lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = config.hidden_size
_lowerCAmelCase = """vit.encoder.layer."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 24
_lowerCAmelCase = 16
elif "huge" in checkpoint_url:
_lowerCAmelCase = 14
_lowerCAmelCase = 12_80
_lowerCAmelCase = 51_20
_lowerCAmelCase = 32
_lowerCAmelCase = 16
_lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase )
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""model"""]
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
_lowerCAmelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_lowerCAmelCase = model(**lowerCAmelCase )
_lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
_lowerCAmelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : List[Any] =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 207
| 0
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A ( a ):
def __lt__( self , snake_case_ ) -> Optional[int]:
return self[-1] < other[-1]
def __eq__( self , snake_case_ ) -> Optional[Any]:
return self[-1] == other[-1]
def _lowercase ( lowerCamelCase__ : list ):
_a = []
# sort into stacks
for element in collection:
_a = Stack([element] )
_a = bisect_left(lowerCamelCase__, lowerCamelCase__ )
if i != len(lowerCamelCase__ ):
stacks[i].append(lowerCamelCase__ )
else:
stacks.append(lowerCamelCase__ )
# use a heap-based merge to merge stack efficiently
_a = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case : Any = input("Enter numbers separated by a comma:\n").strip()
__snake_case : int = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 707
|
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __lowercase (_UpperCAmelCase ):
def __init__( self , **A_ ) ->Optional[int]:
'''simple docstring'''
super().__init__(**A_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(A_ )
def __call__( self , A_ , A_ = None , **A_ , ) ->str:
'''simple docstring'''
if "text_queries" in kwargs:
__lowerCAmelCase : int = kwargs.pop('''text_queries''' )
if isinstance(A_ , (str, Image.Image) ):
__lowerCAmelCase : Optional[Any] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowerCAmelCase : List[str] = image
__lowerCAmelCase : Dict = super().__call__(A_ , **A_ )
return results
def UpperCamelCase__ ( self , **A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = {}
if "threshold" in kwargs:
__lowerCAmelCase : Union[str, Any] = kwargs['''threshold''']
if "top_k" in kwargs:
__lowerCAmelCase : Optional[Any] = kwargs['''top_k''']
return {}, {}, postprocess_params
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = load_image(inputs['''image'''] )
__lowerCAmelCase : str = inputs['''candidate_labels''']
if isinstance(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = candidate_labels.split(''',''' )
__lowerCAmelCase : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(A_ ):
__lowerCAmelCase : Dict = self.tokenizer(A_ , return_tensors=self.framework )
__lowerCAmelCase : int = self.image_processor(A_ , return_tensors=self.framework )
yield {
"is_last": i == len(A_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = model_inputs.pop('''target_size''' )
__lowerCAmelCase : Optional[int] = model_inputs.pop('''candidate_label''' )
__lowerCAmelCase : int = model_inputs.pop('''is_last''' )
__lowerCAmelCase : Optional[Any] = self.model(**A_ )
__lowerCAmelCase : int = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def UpperCamelCase__ ( self , A_ , A_=0.1 , A_=None ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = []
for model_output in model_outputs:
__lowerCAmelCase : Any = model_output['''candidate_label''']
__lowerCAmelCase : Optional[int] = BaseModelOutput(A_ )
__lowerCAmelCase : Optional[int] = self.image_processor.post_process_object_detection(
outputs=A_ , threshold=A_ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowerCAmelCase : Dict = outputs['''scores'''][index].item()
__lowerCAmelCase : List[Any] = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowerCAmelCase : Tuple = {'''score''': score, '''label''': label, '''box''': box}
results.append(A_ )
__lowerCAmelCase : int = sorted(A_ , key=lambda A_ : x["score"] , reverse=A_ )
if top_k:
__lowerCAmelCase : Any = results[:top_k]
return results
def UpperCamelCase__ ( self , A_ ) ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = box.int().tolist()
__lowerCAmelCase : str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 492
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def _lowercase ( ):
__lowerCAmelCase : Optional[int] = 9
__lowerCAmelCase : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
__lowerCAmelCase : str = kruskal(lowercase__ , lowercase__ )
__lowerCAmelCase : int = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowercase__ ) == sorted(lowercase__ )
| 492
| 1
|
"""simple docstring"""
import string
import numpy
def lowercase (snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class SCREAMING_SNAKE_CASE__ :
_a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_a = numpy.vectorize(lambda _a : x % 36 )
_a = numpy.vectorize(_a )
def __init__( self : Optional[int] , lowerCAmelCase : numpy.ndarray ):
lowerCAmelCase = self.modulus(lowerCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase = encrypt_key.shape[0]
def __lowercase ( self : Optional[Any] , lowerCAmelCase : str ):
return self.key_string.index(lowerCAmelCase )
def __lowercase ( self : Optional[int] , lowerCAmelCase : int ):
return self.key_string[round(lowerCAmelCase )]
def __lowercase ( self : Tuple ):
lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase = det % len(self.key_string )
lowerCAmelCase = len(self.key_string )
if greatest_common_divisor(lowerCAmelCase , len(self.key_string ) ) != 1:
lowerCAmelCase = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(lowerCAmelCase )
def __lowercase ( self : List[str] , lowerCAmelCase : str ):
lowerCAmelCase = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase = chars[-1]
while len(lowerCAmelCase ) % self.break_key != 0:
chars.append(lowerCAmelCase )
return "".join(lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : str ):
lowerCAmelCase = self.process_text(text.upper() )
lowerCAmelCase = """"""
for i in range(0 , len(lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase = text[i : i + self.break_key]
lowerCAmelCase = [self.replace_letters(lowerCAmelCase ) for char in batch]
lowerCAmelCase = numpy.array([vec] ).T
lowerCAmelCase = self.modulus(self.encrypt_key.dot(lowerCAmelCase ) ).T.tolist()[
0
]
lowerCAmelCase = """""".join(
self.replace_digits(lowerCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase = det % len(self.key_string )
lowerCAmelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase = i
break
lowerCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowerCAmelCase ) )
def __lowercase ( self : int , lowerCAmelCase : str ):
lowerCAmelCase = self.make_decrypt_key()
lowerCAmelCase = self.process_text(text.upper() )
lowerCAmelCase = """"""
for i in range(0 , len(lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase = text[i : i + self.break_key]
lowerCAmelCase = [self.replace_letters(lowerCAmelCase ) for char in batch]
lowerCAmelCase = numpy.array([vec] ).T
lowerCAmelCase = self.modulus(decrypt_key.dot(lowerCAmelCase ) ).T.tolist()[0]
lowerCAmelCase = """""".join(
self.replace_digits(lowerCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowercase () -> None:
'''simple docstring'''
lowerCAmelCase = int(input("""Enter the order of the encryption key: """ ) )
lowerCAmelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case__ ):
lowerCAmelCase = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
lowerCAmelCase = HillCipher(numpy.array(snake_case__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
lowerCAmelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
lowerCAmelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
lowerCAmelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 529
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase (snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCAmelCase = cst_fwd.get(snake_case__ , np.inf )
lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCAmelCase = new_cost_f
lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase (snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ) -> int:
'''simple docstring'''
lowerCAmelCase = -1
lowerCAmelCase = set()
lowerCAmelCase = set()
lowerCAmelCase = {source: 0}
lowerCAmelCase = {destination: 0}
lowerCAmelCase = {source: None}
lowerCAmelCase = {destination: None}
lowerCAmelCase = PriorityQueue()
lowerCAmelCase = PriorityQueue()
lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCAmelCase , lowerCAmelCase = queue_forward.get()
visited_forward.add(snake_case__ )
lowerCAmelCase , lowerCAmelCase = queue_backward.get()
visited_backward.add(snake_case__ )
lowerCAmelCase = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
lowerCAmelCase = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCAmelCase = shortest_distance
return shortest_path_distance
a = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
a = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
snake_case = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
snake_case = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
snake_case = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def __UpperCAmelCase ( self : Optional[Any] ,__A : Optional[Any] ,__A : List[str] ,__A : List[str]=None ,__A : List[Any]=None ,__A : Dict=None ,__A : str=None ,__A : int="auto" ,__A : List[str]=-1 ,__A : Union[str, Any]=0.9 ,__A : Dict=5 ,__A : Optional[Any]=500 ,__A : int="gpt2-large" ,__A : List[Any]=-1 ,__A : List[Any]=1024 ,__A : Tuple=25 ,__A : Optional[Any]=5 ,__A : int=True ,__A : int=25 ,) -> List[str]:
_lowercase = compute_mauve(
p_text=__A ,q_text=__A ,p_features=__A ,q_features=__A ,p_tokens=__A ,q_tokens=__A ,num_buckets=__A ,pca_max_data=__A ,kmeans_explained_var=__A ,kmeans_num_redo=__A ,kmeans_max_iter=__A ,featurize_model_name=__A ,device_id=__A ,max_text_length=__A ,divergence_curve_discretization_size=__A ,mauve_scaling_factor=__A ,verbose=__A ,seed=__A ,)
return out
| 67
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowerCAmelCase :List[str] = 16
_lowerCAmelCase :Any = 32
def lowerCamelCase_ (UpperCamelCase__ : int ):
return int(x / 2**20 )
class _UpperCAmelCase :
'''simple docstring'''
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> Any:
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : Optional[int] = torch.cuda.memory_allocated()
_UpperCAmelCase : int = torch.cuda.max_memory_allocated()
_UpperCAmelCase : str = bamb(self.end - self.begin )
_UpperCAmelCase : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase_ (UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" , UpperCamelCase__ : int = 320 , UpperCamelCase__ : int = 160 , ):
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
_UpperCAmelCase : int = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F'train[:{n_train}]', '''validation''': F'validation[:{n_val}]'} )
def tokenize_function(UpperCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
# Initialize accelerator
_UpperCAmelCase : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[str] = config['''lr''']
_UpperCAmelCase : List[Any] = int(config['''num_epochs'''] )
_UpperCAmelCase : Optional[int] = int(config['''seed'''] )
_UpperCAmelCase : Optional[Any] = int(config['''batch_size'''] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : str = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
_UpperCAmelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : Optional[int] = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCAmelCase : str = 1
_UpperCAmelCase : List[Any] = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
_UpperCAmelCase : str = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : Optional[Any] = 0
# Now we train the model
_UpperCAmelCase : List[str] = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = model(**UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = outputs.loss
_UpperCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ():
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=UpperCamelCase__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=UpperCamelCase__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase__ , default=1 , help='''Number of train epochs.''' , )
_UpperCAmelCase : List[Any] = parser.parse_args()
_UpperCAmelCase : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 506
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
SCREAMING_SNAKE_CASE__ = '''CIDAS/clipseg-rd64-refined'''
SCREAMING_SNAKE_CASE__ = '''image_segmenter'''
SCREAMING_SNAKE_CASE__ = CLIPSegForImageSegmentation
SCREAMING_SNAKE_CASE__ = ['''image''', '''text''']
SCREAMING_SNAKE_CASE__ = ['''image''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['vision'] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='pt' )
def lowerCAmelCase ( self , _lowerCamelCase ):
with torch.no_grad():
__UpperCamelCase : List[str] = self.model(**_lowerCamelCase ).logits
return logits
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Dict = outputs.cpu().detach().numpy()
__UpperCamelCase : Dict = 0
__UpperCamelCase : Any = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 287
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a= {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =StableDiffusionInstructPixaPixPipeline
a : List[str] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
a : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
torch.manual_seed(0 )
UpperCamelCase_: Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase_: List[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_: Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase_: int = CLIPTextModel(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_: Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
UpperCamelCase_: str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCamelCase_: Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_: str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' )
if str(_lowerCamelCase ).startswith('mps' ):
UpperCamelCase_: List[Any] = torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase_: Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
UpperCamelCase_: List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: str = self.get_dummy_components()
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: int = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: int = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Optional[int] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: int = self.get_dummy_components()
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Any = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Dict = 'french fries'
UpperCamelCase_: Optional[int] = sd_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Dict = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Tuple = self.get_dummy_components()
UpperCamelCase_: Any = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: List[Any] = [inputs['prompt']] * 2
UpperCamelCase_: Optional[Any] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
UpperCamelCase_: Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
UpperCamelCase_: Tuple = image / 2 + 0.5
UpperCamelCase_: int = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase_: int = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase_: List[Any] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase_: List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
UpperCamelCase_: Optional[int] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Any = self.get_dummy_components()
UpperCamelCase_: str = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Optional[int] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Tuple = [round(_lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(_lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Tuple = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a ( self ):
UpperCamelCase_: int = self.get_dummy_components()
UpperCamelCase_: Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase_: str = VaeImageProcessor(do_resize=_lowerCamelCase , do_normalize=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = pipe(**self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='pt' ) )[0]
UpperCamelCase_: Union[str, Any] = components['vae']
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase_: str = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase_: Dict = pipe(**_lowerCamelCase )[0]
UpperCamelCase_: str = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCamelCase , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase=0 ):
UpperCamelCase_: int = torch.manual_seed(_lowerCamelCase )
UpperCamelCase_: int = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
UpperCamelCase_: Union[str, Any] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
UpperCamelCase_: Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: List[str] = self.get_inputs()
UpperCamelCase_: Optional[int] = pipe(**_lowerCamelCase ).images
UpperCamelCase_: Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: Optional[Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase )
UpperCamelCase_: List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: Union[str, Any] = self.get_inputs()
UpperCamelCase_: Optional[Any] = pipe(**_lowerCamelCase ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: Any = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase )
UpperCamelCase_: Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: int = self.get_inputs()
UpperCamelCase_: Any = pipe(**_lowerCamelCase ).images
UpperCamelCase_: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_: str = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: Optional[Any] = 0
def callback_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> None:
UpperCamelCase_: List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase_: List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCamelCase_: Optional[Any] = latents[0, -3:, -3:, -1]
UpperCamelCase_: Tuple = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase_: str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCamelCase_: Optional[int] = latents[0, -3:, -3:, -1]
UpperCamelCase_: Any = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase_: Tuple = False
UpperCamelCase_: Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
UpperCamelCase_: Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: Optional[Any] = self.get_inputs()
pipe(**_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase_: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
UpperCamelCase_: Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase_: Union[str, Any] = self.get_inputs()
UpperCamelCase_: Optional[Any] = pipe(**_lowerCamelCase )
UpperCamelCase_: str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _a ( self ):
UpperCamelCase_: Tuple = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase_: Dict = inputs['image'].resize((5_0_4, 5_0_4) )
UpperCamelCase_: int = 'timbrooks/instruct-pix2pix'
UpperCamelCase_: Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase_: Tuple = pipe(**_lowerCamelCase )
UpperCamelCase_: Optional[int] = output.images[0]
UpperCamelCase_: List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
UpperCamelCase_: Union[str, Any] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 57
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Optional[int] = '▁'
snake_case : int = {'vocab_file': 'sentencepiece.bpe.model'}
snake_case : Any = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
snake_case : Union[str, Any] = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
snake_case : Union[str, Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , A_ = None , A_=None , **A_ , )-> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , tokenizer_file=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
_SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_SCREAMING_SNAKE_CASE = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = len(self.sp_model )
_SCREAMING_SNAKE_CASE = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A_ )
}
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()}
_SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_SCREAMING_SNAKE_CASE = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'en_XX'
_SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang]
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self )-> List[Any]:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ )-> int:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self )-> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self )-> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , A_ )-> None:
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , A_ , A_ = None , A_ = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
_SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def __magic_name__ ( self , A_ , A_ = None )-> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , A_ , A_ = None )-> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , A_ , A_ , A_ , A_ , **A_ )-> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(A_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def __magic_name__ ( self )-> List[str]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , A_ )-> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def __magic_name__ ( self , A_ )-> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , A_ )-> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = ''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def __magic_name__ ( self , A_ , A_ = None )-> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def __magic_name__ ( self , A_ , A_ = "en_XX" , A_ = None , A_ = "ro_RO" , **A_ , )-> BatchEncoding:
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def __magic_name__ ( self )-> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self )-> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , A_ )-> None:
_SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
def __magic_name__ ( self , A_ )-> None:
_SCREAMING_SNAKE_CASE = self.lang_code_to_id[lang]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
| 605
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , )-> Dict:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_token_type_ids
__A = use_input_mask
__A = use_labels
__A = use_mc_token_ids
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
if self.use_mc_token_ids:
__A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
__A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Tuple:
__A = CTRLModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
model(UpperCAmelCase , token_type_ids=UpperCAmelCase , head_mask=UpperCAmelCase )
model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
__A = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Optional[Any]:
__A = CTRLLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Optional[int]:
__A = self.num_labels
__A = CTRLForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowerCAmelCase( _a , _a , _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = CTRLModelTester(self )
__A = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = CTRLModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
pass
@require_torch
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(UpperCAmelCase )
__A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCAmelCase ) # Legal the president is
__A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__A = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase )
| 341
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : str = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 341
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __lowercase , )
class A ( __lowercase ):
_snake_case =RobertaConfig
_snake_case ='''roberta'''
def __init__( self: Any , _lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =RobertaEmbeddings(_lowerCAmelCase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __lowercase , )
class A ( __lowercase ):
_snake_case =RobertaConfig
_snake_case ='''roberta'''
def __init__( self: int , _lowerCAmelCase: List[str] ) -> Any:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =config.num_labels
UpperCAmelCase_ =config.num_hidden_layers
UpperCAmelCase_ =DeeRobertaModel(_lowerCAmelCase )
UpperCAmelCase_ =nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Any=None , _lowerCAmelCase: int=None , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: str=None , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Optional[int]=None , _lowerCAmelCase: List[str]=-1 , _lowerCAmelCase: Dict=False , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.num_layers
try:
UpperCAmelCase_ =self.roberta(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , )
UpperCAmelCase_ =outputs[1]
UpperCAmelCase_ =self.dropout(_lowerCAmelCase )
UpperCAmelCase_ =self.classifier(_lowerCAmelCase )
UpperCAmelCase_ =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ =e.message
UpperCAmelCase_ =e.exit_layer
UpperCAmelCase_ =outputs[0]
if not self.training:
UpperCAmelCase_ =entropy(_lowerCAmelCase )
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ =MSELoss()
UpperCAmelCase_ =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ =CrossEntropyLoss()
UpperCAmelCase_ =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ =[]
for highway_exit in outputs[-1]:
UpperCAmelCase_ =highway_exit[0]
if not self.training:
highway_logits_all.append(_lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ =MSELoss()
UpperCAmelCase_ =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ =CrossEntropyLoss()
UpperCAmelCase_ =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowerCAmelCase )
if train_highway:
UpperCAmelCase_ =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ =(loss,) + outputs
if not self.training:
UpperCAmelCase_ =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 54
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
_SCREAMING_SNAKE_CASE = {'+', '-', '*', '/'}
_SCREAMING_SNAKE_CASE = []
for token in postfix_notation:
if token in operations:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
| 0
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase (__A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a = cst_fwd.get(__A , np.inf)
_a = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
_a = new_cost_f
_a = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = -1
_a = set()
_a = set()
_a = {source: 0}
_a = {destination: 0}
_a = {source: None}
_a = {destination: None}
_a = PriorityQueue()
_a = PriorityQueue()
_a = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a = queue_forward.get()
visited_forward.add(__A)
_a , _a = queue_backward.get()
visited_backward.add(__A)
_a = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
_a = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a = shortest_distance
return shortest_path_distance
lowercase_ = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
lowercase_ = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowercase_ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
lowercase_ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
lowercase_ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def a__ (self , A , A , A=4 , A=False ) -> Tuple:
"""simple docstring"""
_a = compute_bleu(
reference_corpus=A , translation_corpus=A , max_order=A , smooth=A )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 352
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.bpe.model'}
a_ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
a_ = {
'camembert-base': 512,
}
a_ = '▁'
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any]="<s>" , snake_case : Optional[Any]="</s>" , snake_case : Optional[int]="</s>" , snake_case : List[str]="<s>" , snake_case : Dict="<unk>" , snake_case : int="<pad>" , snake_case : Dict="<mask>" , snake_case : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : int , ) -> None:
"""simple docstring"""
UpperCamelCase_ : Any = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
UpperCamelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCamelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
UpperCamelCase_ : Optional[int] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCamelCase_ : List[str] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
UpperCamelCase_ : str = len(self.fairseq_tokens_to_ids )
UpperCamelCase_ : Optional[int] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCamelCase_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ : Tuple = [self.cls_token_id]
UpperCamelCase_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = [self.sep_token_id]
UpperCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : str ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(snake_case ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Dict ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = []
UpperCamelCase_ : Tuple = ''
UpperCamelCase_ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : List[str] = []
else:
current_sub_tokens.append(snake_case )
UpperCamelCase_ : Optional[Any] = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def __getstate__( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : str = self.__dict__.copy()
UpperCamelCase_ : Union[str, Any] = None
return state
def __setstate__( self : Any , snake_case : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : List[Any] = {}
UpperCamelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : Dict = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
UpperCamelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 417
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Any = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase_ : Union[str, Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase_ : Union[str, Any] = 4
UpperCamelCase_ : Union[str, Any] = 48
UpperCamelCase_ : List[Any] = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase_ : Union[str, Any] = [6, 6, 6, 6]
UpperCamelCase_ : Union[str, Any] = 60
UpperCamelCase_ : List[str] = [6, 6, 6, 6]
UpperCamelCase_ : Tuple = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase_ : Optional[int] = 4
UpperCamelCase_ : Dict = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase_ : Any = 1
UpperCamelCase_ : List[Any] = 1
UpperCamelCase_ : List[str] = 126
UpperCamelCase_ : Dict = 7
UpperCamelCase_ : int = 2_5_5.0
UpperCamelCase_ : str = ''
return config
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] ):
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase_ : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase_ : Tuple = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
UpperCamelCase_ : List[str] = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
UpperCamelCase_ : Optional[Any] = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
UpperCamelCase_ : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase_ : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase_ : List[str] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase_ : Optional[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase_ : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase_ : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
UpperCamelCase_ : List[str] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
UpperCamelCase_ : str = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
UpperCamelCase_ : Tuple = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
UpperCamelCase_ : Optional[int] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
UpperCamelCase_ : Union[str, Any] = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
UpperCamelCase_ : Dict = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase_ : List[str] = 'layernorm.bias'
if "conv_first" in name:
UpperCamelCase_ : Any = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase_ : int = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase_ : Union[str, Any] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
UpperCamelCase_ : Optional[Any] = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
UpperCamelCase_ : Optional[Any] = name.replace('upsample.2' , 'upsample.convolution_1' )
UpperCamelCase_ : Dict = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase_ : List[Any] = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
UpperCamelCase_ : List[Any] = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
UpperCamelCase_ : Union[str, Any] = 'swin2sr.' + name
return name
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : str ):
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : Any = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
UpperCamelCase_ : List[str] = key.split('.' )
UpperCamelCase_ : Any = int(key_split[1] )
UpperCamelCase_ : Union[str, Any] = int(key_split[4] )
UpperCamelCase_ : str = config.embed_dim
if "weight" in key:
UpperCamelCase_ : str = val[:dim, :]
UpperCamelCase_ : Optional[int] = val[dim : dim * 2, :]
UpperCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
UpperCamelCase_ : Dict = val[:dim]
UpperCamelCase_ : Tuple = val[dim : dim * 2]
UpperCamelCase_ : str = val[-dim:]
pass
else:
UpperCamelCase_ : Tuple = val
return orig_state_dict
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : str ):
UpperCamelCase_ : int = get_config(lowerCamelCase )
UpperCamelCase_ : Dict = SwinaSRForImageSuperResolution(lowerCamelCase )
model.eval()
UpperCamelCase_ : Any = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )
UpperCamelCase_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : List[Any] = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
UpperCamelCase_ : Tuple = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
UpperCamelCase_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('RGB' )
UpperCamelCase_ : Dict = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase_ : Union[str, Any] = 126 if 'Jpeg' in checkpoint_url else 256
UpperCamelCase_ : str = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase_ : Union[str, Any] = transforms(lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase_ : List[str] = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase_ : Optional[Any] = model(lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase_ : int = torch.Size([1, 3, 512, 512] )
UpperCamelCase_ : List[str] = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase_ : str = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase_ : Optional[Any] = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase_ : List[str] = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase_ : Tuple = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase_ : Any = torch.Size([1, 3, 512, 512] )
UpperCamelCase_ : Tuple = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase_ : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase_ : Tuple = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase , atol=1e-3 )
print('Looks ok!' )
UpperCamelCase_ : List[Any] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
UpperCamelCase_ : List[str] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
a_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 417
| 1
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase : Tuple = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase : Dict = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( snake_case :List[str] , snake_case :Optional[int] , snake_case :Optional[Any] ) -> Any:
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__UpperCamelCase = json.load(snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(snake_case )] )
with open(snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case )
if strict and len(snake_case ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(snake_case ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*snake_case , sep='\n' )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 709
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "efficientformer"
def __init__( self , __UpperCAmelCase = [3, 2, 6, 4] , __UpperCAmelCase = [48, 96, 224, 448] , __UpperCAmelCase = [True, True, True, True] , __UpperCAmelCase = 448 , __UpperCAmelCase = 32 , __UpperCAmelCase = 4 , __UpperCAmelCase = 7 , __UpperCAmelCase = 5 , __UpperCAmelCase = 8 , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 16 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.0_2 , __UpperCAmelCase = 1E-12 , __UpperCAmelCase = 224 , __UpperCAmelCase = 1E-05 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = depths
__UpperCamelCase = mlp_expansion_ratio
__UpperCamelCase = downsamples
__UpperCamelCase = dim
__UpperCamelCase = key_dim
__UpperCamelCase = attention_ratio
__UpperCamelCase = resolution
__UpperCamelCase = pool_size
__UpperCamelCase = downsample_patch_size
__UpperCamelCase = downsample_stride
__UpperCamelCase = downsample_pad
__UpperCamelCase = drop_path_rate
__UpperCamelCase = num_metaad_blocks
__UpperCamelCase = distillation
__UpperCamelCase = use_layer_scale
__UpperCamelCase = layer_scale_init_value
__UpperCamelCase = image_size
__UpperCamelCase = batch_norm_eps
| 293
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 575
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["pixel_values"]
def __init__( self : Dict , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : int = 0.9 , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : Union[int, float] = 1 / 2_5_5 , lowercase__ : bool = True , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , **lowercase__ : Dict , ):
super().__init__(**lowercase__ )
__lowercase : List[str] = size if size is not None else {"shortest_edge": 2_2_4}
__lowercase : int = get_size_dict(lowercase__ , default_to_square=lowercase__ )
__lowercase : Tuple = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowercase : str = get_size_dict(lowercase__ , param_name="crop_size" )
__lowercase : Tuple = do_resize
__lowercase : Tuple = size
__lowercase : Any = crop_pct
__lowercase : List[str] = resample
__lowercase : str = do_center_crop
__lowercase : int = crop_size
__lowercase : List[str] = do_rescale
__lowercase : List[Any] = rescale_factor
__lowercase : Union[str, Any] = do_normalize
__lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case ( self : Any , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : Optional[float] = None , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Any , ):
__lowercase : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
__lowercase : Any = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__lowercase : Tuple = int(size["height"] / crop_pct )
else:
__lowercase : Optional[int] = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(lowercase__ ) )
__lowercase : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
__lowercase : Any = get_resize_output_image_size(lowercase__ , size=size["shortest_edge"] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
__lowercase : List[Any] = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : Optional[int] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Union[str, Any] , ):
__lowercase : str = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["height"], size["width"]) , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Tuple , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : Union[str, Any] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : List[str] , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def snake_case ( self : Dict , lowercase__ : ImageInput , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : int = None , lowercase__ : PILImageResampling = None , lowercase__ : bool = None , lowercase__ : Dict[str, int] = None , lowercase__ : bool = None , lowercase__ : float = None , lowercase__ : bool = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : int , ):
__lowercase : str = do_resize if do_resize is not None else self.do_resize
__lowercase : Any = crop_pct if crop_pct is not None else self.crop_pct
__lowercase : List[str] = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Any = image_mean if image_mean is not None else self.image_mean
__lowercase : int = image_std if image_std is not None else self.image_std
__lowercase : Optional[int] = size if size is not None else self.size
__lowercase : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(lowercase__ , param_name="crop_size" )
__lowercase : Optional[int] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase : Optional[Any] = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase : List[str] = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase : Any = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase : Tuple = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
__lowercase : Any = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__lowercase : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 575
| 1
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
snake_case_ = os.path.abspath(lowercase_ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
snake_case_ = tf.train.list_variables(lowercase_ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case_ = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case_ = name[1:]
# figure out how many levels deep the name is
snake_case_ = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowercase_ )
# read data
snake_case_ = tf.train.load_variable(lowercase_ , lowercase_ )
names.append("""/""".join(lowercase_ ) )
arrays.append(lowercase_ )
logger.info(f'''Read a total of {len(lowercase_ ):,} layers''' )
# Sanity check
if len(set(lowercase_ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(lowercase_ ) )})''' )
snake_case_ = list(set(lowercase_ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowercase_ , lowercase_ ):
snake_case_ = full_name.split("""/""" )
snake_case_ = model
snake_case_ = []
for i, m_name in enumerate(lowercase_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
snake_case_ = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """embeddings""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
snake_case_ = getattr(lowercase_ , """encoder""" )
snake_case_ = getattr(lowercase_ , """layer""" )
snake_case_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
snake_case_ = getattr(lowercase_ , """pooler""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
snake_case_ = getattr(lowercase_ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
snake_case_ = getattr(lowercase_ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
snake_case_ = getattr(lowercase_ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
snake_case_ = getattr(lowercase_ , """token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
snake_case_ = getattr(lowercase_ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
snake_case_ = getattr(lowercase_ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
snake_case_ = getattr(lowercase_ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
snake_case_ = getattr(lowercase_ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
snake_case_ = getattr(lowercase_ , """intermediate""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
snake_case_ = getattr(lowercase_ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
snake_case_ = getattr(lowercase_ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
snake_case_ = getattr(lowercase_ , """weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
snake_case_ = """.""".join(lowercase_ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase_ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , lowercase_ ):
snake_case_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case_ = array.transpose()
if pointer.shape == array.shape:
snake_case_ = torch.from_numpy(lowercase_ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
snake_case_ = BertConfig.from_json_file(lowercase_ )
snake_case_ = BertModel(lowercase_ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCamelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 161
|
def UpperCamelCase( lowercase_ = 200 ) -> int:
'''simple docstring'''
snake_case_ = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case_ = [0] * (pence + 1)
snake_case_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 161
| 1
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : int = DownBlockaD # noqa F405
__snake_case : Union[str, Any] = "down"
def A__ ( self ):
_A : Optional[int] = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : int = ResnetDownsampleBlockaD # noqa F405
__snake_case : Optional[int] = "down"
def A__ ( self ):
_A : Union[str, Any] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : str = AttnDownBlockaD # noqa F405
__snake_case : Union[str, Any] = "down"
def A__ ( self ):
_A : Union[str, Any] = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : List[Any] = CrossAttnDownBlockaD # noqa F405
__snake_case : Optional[int] = "down"
def A__ ( self ):
_A , _A : int = super().prepare_init_args_and_inputs_for_common()
_A : Optional[Any] = 32
return init_dict, inputs_dict
def A__ ( self ):
_A : Dict = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : str = SimpleCrossAttnDownBlockaD # noqa F405
__snake_case : List[Any] = "down"
@property
def A__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=A__ )
def A__ ( self ):
_A , _A : Optional[int] = super().prepare_init_args_and_inputs_for_common()
_A : Optional[int] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def A__ ( self ):
_A : Optional[Any] = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : List[Any] = SkipDownBlockaD # noqa F405
__snake_case : Union[str, Any] = "down"
@property
def A__ ( self ):
return super().get_dummy_input(include_skip_sample=A__ )
def A__ ( self ):
_A : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
__snake_case : Optional[int] = "down"
@property
def A__ ( self ):
return super().get_dummy_input(include_skip_sample=A__ )
def A__ ( self ):
_A : List[str] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : int = DownEncoderBlockaD # noqa F405
__snake_case : Any = "down"
@property
def A__ ( self ):
return super().get_dummy_input(include_temb=A__ )
def A__ ( self ):
_A : Any = {
'''in_channels''': 32,
'''out_channels''': 32,
}
_A : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ):
_A : Any = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : List[Any] = AttnDownEncoderBlockaD # noqa F405
__snake_case : Tuple = "down"
@property
def A__ ( self ):
return super().get_dummy_input(include_temb=A__ )
def A__ ( self ):
_A : Tuple = {
'''in_channels''': 32,
'''out_channels''': 32,
}
_A : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ):
_A : Optional[Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Optional[int] = UNetMidBlockaD # noqa F405
__snake_case : str = "mid"
def A__ ( self ):
_A : Tuple = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
_A : Any = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ):
_A : str = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : str = UNetMidBlockaDCrossAttn # noqa F405
__snake_case : Optional[int] = "mid"
def A__ ( self ):
_A , _A : Tuple = super().prepare_init_args_and_inputs_for_common()
_A : Optional[Any] = 32
return init_dict, inputs_dict
def A__ ( self ):
_A : int = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
__snake_case : List[Any] = "mid"
@property
def A__ ( self ):
return super().get_dummy_input(include_encoder_hidden_states=A__ )
def A__ ( self ):
_A , _A : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
_A : Tuple = 32
return init_dict, inputs_dict
def A__ ( self ):
_A : Tuple = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Optional[int] = UpBlockaD # noqa F405
__snake_case : Dict = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=A__ )
def A__ ( self ):
_A : Tuple = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Union[str, Any] = ResnetUpsampleBlockaD # noqa F405
__snake_case : Any = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=A__ )
def A__ ( self ):
_A : Tuple = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Optional[int] = CrossAttnUpBlockaD # noqa F405
__snake_case : Union[str, Any] = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=A__ )
def A__ ( self ):
_A , _A : Optional[int] = super().prepare_init_args_and_inputs_for_common()
_A : str = 32
return init_dict, inputs_dict
def A__ ( self ):
_A : Optional[int] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : List[str] = SimpleCrossAttnUpBlockaD # noqa F405
__snake_case : Optional[Any] = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=A__ ,include_encoder_hidden_states=A__ )
def A__ ( self ):
_A , _A : Dict = super().prepare_init_args_and_inputs_for_common()
_A : Optional[Any] = 32
return init_dict, inputs_dict
def A__ ( self ):
_A : Optional[int] = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Tuple = AttnUpBlockaD # noqa F405
__snake_case : Optional[int] = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=A__ )
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def A__ ( self ):
_A : int = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : int = SkipUpBlockaD # noqa F405
__snake_case : str = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=A__ )
def A__ ( self ):
_A : Any = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Tuple = AttnSkipUpBlockaD # noqa F405
__snake_case : List[str] = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=A__ )
def A__ ( self ):
_A : Optional[int] = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : str = UpDecoderBlockaD # noqa F405
__snake_case : Union[str, Any] = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_temb=A__ )
def A__ ( self ):
_A : List[str] = {'''in_channels''': 32, '''out_channels''': 32}
_A : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ):
_A : List[Any] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(A__ )
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Any = AttnUpDecoderBlockaD # noqa F405
__snake_case : int = "up"
@property
def A__ ( self ):
return super().get_dummy_input(include_temb=A__ )
def A__ ( self ):
_A : List[Any] = {'''in_channels''': 32, '''out_channels''': 32}
_A : str = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ):
_A : Optional[int] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(A__ )
| 206
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Dict =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_UpperCamelCase : Optional[Any] =256047
_UpperCamelCase : int =256145
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Any = NllbTokenizer
__snake_case : Dict = NllbTokenizerFast
__snake_case : List[Any] = True
__snake_case : Any = True
__snake_case : List[str] = {}
def A__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_A : Any = NllbTokenizer(A__ ,keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ):
_A : Dict = NllbTokenizer(A__ ,keep_accents=A__ )
_A : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
_A : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
_A : int = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
_A : Optional[int] = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def A__ ( self ):
_A : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A : List[str] = self.rust_tokenizer_class.from_pretrained(A__ ,**A__ )
_A : Tuple = self.tokenizer_class.from_pretrained(A__ ,**A__ )
_A : Tuple = tempfile.mkdtemp()
_A : List[Any] = tokenizer_r.save_pretrained(A__ )
_A : Tuple = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_A : Dict = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A__ ,A__ )
# Checks everything loads correctly in the same way
_A : Optional[int] = tokenizer_r.from_pretrained(A__ )
_A : Optional[Any] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ ,A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
_A : List[Any] = tempfile.mkdtemp()
_A : List[Any] = tokenizer_r.save_pretrained(A__ ,legacy_format=A__ )
_A : List[Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ ,A__ )
# Checks everything loads correctly in the same way
_A : List[Any] = tokenizer_r.from_pretrained(A__ )
_A : int = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ ,A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
_A : int = tempfile.mkdtemp()
_A : List[Any] = tokenizer_r.save_pretrained(A__ ,legacy_format=A__ )
_A : Dict = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_A : Optional[int] = tokenizer_r.from_pretrained(A__ )
_A : str = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ ,A__ ) )
shutil.rmtree(A__ )
@require_torch
def A__ ( self ):
if not self.test_seqaseq:
return
_A : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_A : Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
_A : Tuple = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
_A : List[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=A__ ,tgt_texts=A__ ,max_length=3 ,max_target_length=10 ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''' ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,10 )
# max_target_length will default to max_length if not specified
_A : int = tokenizer.prepare_seqaseq_batch(
A__ ,tgt_texts=A__ ,max_length=3 ,return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,3 )
_A : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=A__ ,max_length=3 ,max_target_length=10 ,return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 )
self.assertNotIn('''decoder_input_ids''' ,A__ )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def A__ ( self ):
pass
def A__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A : List[str] = [AddedToken('''<special>''' ,lstrip=A__ )]
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
A__ ,additional_special_tokens=A__ ,**A__ )
_A : List[str] = tokenizer_r.encode('''Hey this is a <special> token''' )
_A : List[Any] = tokenizer_r.encode('''<special>''' ,add_special_tokens=A__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_A : Dict = self.rust_tokenizer_class.from_pretrained(
A__ ,additional_special_tokens=A__ ,**A__ ,)
_A : List[Any] = self.tokenizer_class.from_pretrained(
A__ ,additional_special_tokens=A__ ,**A__ )
_A : int = tokenizer_p.encode('''Hey this is a <special> token''' )
_A : Optional[int] = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(A__ ,A__ )
self.assertEqual(A__ ,A__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
__snake_case : Union[str, Any] = "facebook/nllb-200-distilled-600M"
__snake_case : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__snake_case : List[str] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__snake_case : int = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def A__ ( cls ):
_A : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''' )
_A : List[str] = 1
return cls
def A__ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] ,256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] ,256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] ,256057 )
def A__ ( self ):
_A : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,A__ )
def A__ ( self ):
self.assertIn(A__ ,self.tokenizer.all_special_ids )
# fmt: off
_A : Dict = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
_A : Any = self.tokenizer.decode(A__ ,skip_special_tokens=A__ )
_A : Any = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=A__ )
self.assertEqual(A__ ,A__ )
self.assertNotIn(self.tokenizer.eos_token ,A__ )
def A__ ( self ):
_A : int = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] ,A__ )
_A : Tuple = 10
_A : Optional[Any] = self.tokenizer(A__ ,max_length=A__ ,truncation=A__ ).input_ids[0]
self.assertEqual(ids[-1] ,2 )
self.assertEqual(ids[0] ,A__ )
self.assertEqual(len(A__ ) ,A__ )
def A__ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) ,[256203, 3] )
def A__ ( self ):
_A : str = tempfile.mkdtemp()
_A : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
_A : Any = NllbTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,A__ )
@require_torch
def A__ ( self ):
_A : Optional[int] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=A__ ,truncation=A__ ,max_length=len(self.expected_src_tokens ) ,return_tensors='''pt''' ,)
_A : str = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(A__ ,A__ )
self.assertEqual((2, 15) ,batch.input_ids.shape )
self.assertEqual((2, 15) ,batch.attention_mask.shape )
_A : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,A__ )
self.assertEqual(A__ ,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def A__ ( self ):
_A : str = self.tokenizer(self.src_text ,padding=A__ ,truncation=A__ ,max_length=3 ,return_tensors='''pt''' )
_A : Tuple = self.tokenizer(
text_target=self.tgt_text ,padding=A__ ,truncation=A__ ,max_length=10 ,return_tensors='''pt''' )
_A : int = targets['''input_ids''']
_A : Dict = shift_tokens_right(
A__ ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def A__ ( self ):
_A : str = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(A__ ) ,{
# A, test, EOS, en_XX
'''input_ids''': [[256047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256057,
} ,)
@require_torch
def A__ ( self ):
_A : int = True
_A : Optional[Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids ,[16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
_A : List[str] = False
_A : Union[str, Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids ,[256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 206
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase: int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = XGLMTokenizer
UpperCamelCase_ = XGLMTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = XGLMTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
_lowerCAmelCase = '<pad>'
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(_lowerCAmelCase ) , 1008 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
_lowerCAmelCase = XGLMTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowerCAmelCase , f.name )
_lowerCAmelCase = XGLMTokenizer(f.name , keep_accents=_lowerCAmelCase )
_lowerCAmelCase = pickle.dumps(_lowerCAmelCase )
pickle.loads(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = 'I was born in 92000, and this is falsé.'
_lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
_lowerCAmelCase = 'Hello World!'
_lowerCAmelCase = [2, 3_1227, 4447, 35]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
_lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_lowerCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
# fmt: off
_lowerCAmelCase = {
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='facebook/xglm-564M' , padding=_lowerCAmelCase , )
| 585
|
from __future__ import annotations
from random import choice
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return choice(__SCREAMING_SNAKE_CASE )
def _a ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
_lowerCAmelCase = random_pivot(__SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
_lowerCAmelCase = [e for e in lst if e < pivot]
_lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(__SCREAMING_SNAKE_CASE , k - len(__SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585
| 1
|
from __future__ import annotations
def a ( A__ ) -> None:
'''simple docstring'''
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def a ( A__ , A__ , A__ , A__ , ) -> None:
'''simple docstring'''
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE__ : str = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE__ : Any = False
a_ :list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a_ :list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 35
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "codegen"
lowercase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase__=50400 , UpperCamelCase__=2048 , UpperCamelCase__=2048 , UpperCamelCase__=4096 , UpperCamelCase__=28 , UpperCamelCase__=16 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=50256 , UpperCamelCase__=50256 , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = n_ctx
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = rotary_dim
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = use_cache
A_ = bos_token_id
A_ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase__ ):
# TODO: how to do that better?
A_ = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
A_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A_ = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ = seqlen + 2
A_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A_ = common_inputs["""attention_mask"""]
if self.use_past:
A_ = ordered_inputs["""attention_mask"""].dtype
A_ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 13
| 288
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668
| 1
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
lowercase__ = [int(_snake_case ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(_snake_case ) == 4 and all(0 <= int(_snake_case ) <= 254 for octet in octets )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input().strip()
SCREAMING_SNAKE_CASE__ = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 267
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case (unittest.TestCase ):
lowerCAmelCase__ :Dict = JukeboxTokenizer
lowerCAmelCase__ :List[str] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _a ( self ) -> Dict:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ) -> Optional[Any]:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 267
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 6_5536 , _a = None , _a = 2 , _a = 2 , _a = 0 , _a = "fourier" , _a = True , _a = False , _a = 0.0 , _a = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _a = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _a = "UNetMidBlock1D" , _a = None , _a = (32, 32, 64) , _a = None , _a = 8 , _a = 1 , _a = False , ) -> Optional[Any]:
super().__init__()
_A : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
_A : Union[str, Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_a , log=_a , flip_sin_to_cos=_a )
_A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_A : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_a , downscale_freq_shift=_a )
_A : int = block_out_channels[0]
if use_timestep_embedding:
_A : List[str] = block_out_channels[0] * 4
_A : str = TimestepEmbedding(
in_channels=_a , time_embed_dim=_a , act_fn=_a , out_dim=block_out_channels[0] , )
_A : Dict = nn.ModuleList([] )
_A : Optional[int] = None
_A : List[Any] = nn.ModuleList([] )
_A : Optional[int] = None
# down
_A : Union[str, Any] = in_channels
for i, down_block_type in enumerate(_a ):
_A : List[str] = output_channel
_A : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_A : Any = i == len(_a ) - 1
_A : List[str] = get_down_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_a )
# mid
_A : Union[str, Any] = get_mid_block(
_a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_a , add_downsample=_a , )
# up
_A : List[Any] = list(reversed(_a ) )
_A : str = reversed_block_out_channels[0]
if out_block_type is None:
_A : List[str] = out_channels
else:
_A : Tuple = block_out_channels[0]
for i, up_block_type in enumerate(_a ):
_A : str = output_channel
_A : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(_a ) - 1 else final_upsample_channels
)
_A : Any = i == len(_a ) - 1
_A : Any = get_up_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_a )
_A : Tuple = output_channel
# out
_A : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_A : Optional[Any] = get_out_block(
out_block_type=_a , num_groups_out=_a , embed_dim=block_out_channels[0] , out_channels=_a , act_fn=_a , fc_dim=block_out_channels[-1] // 4 , )
def a__ ( self , _a , _a , _a = True , ) -> Union[UNetaDOutput, Tuple]:
_A : str = timestep
if not torch.is_tensor(_a ):
_A : str = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : int = timesteps[None].to(sample.device )
_A : Union[str, Any] = self.time_proj(_a )
if self.config.use_timestep_embedding:
_A : List[str] = self.time_mlp(_a )
else:
_A : Optional[Any] = timestep_embed[..., None]
_A : Any = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_A : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_A : int = ()
for downsample_block in self.down_blocks:
_A , _A : Optional[Any] = downsample_block(hidden_states=_a , temb=_a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_A : Dict = self.mid_block(_a , _a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_A : str = down_block_res_samples[-1:]
_A : Union[str, Any] = down_block_res_samples[:-1]
_A : Optional[Any] = upsample_block(_a , res_hidden_states_tuple=_a , temb=_a )
# 5. post-process
if self.out_block:
_A : Any = self.out_block(_a , _a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_a )
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCamelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
_lowerCamelCase = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Dict ):
return FSMTTokenizer.from_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :Optional[int] ):
__SCREAMING_SNAKE_CASE : Tuple = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :Any , _lowerCamelCase :Dict ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__SCREAMING_SNAKE_CASE : Dict = f'''facebook/wmt19-{pair}'''
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = self.get_model(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = bleu_data[pair]['''src''']
__SCREAMING_SNAKE_CASE : int = bleu_data[pair]['''tgt''']
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = calculate_bleu(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
self.assertGreaterEqual(scores['''bleu'''] , _lowerCamelCase )
| 674
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case :
def __init__( self :Optional[Any] , _lowerCamelCase :int ):
__SCREAMING_SNAKE_CASE : int = num_of_nodes
__SCREAMING_SNAKE_CASE : list[list[int]] = []
__SCREAMING_SNAKE_CASE : dict[int, int] = {}
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :int ):
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.find_component(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :list[int] , _lowerCamelCase :int , _lowerCamelCase :int ):
if component_size[u_node] <= component_size[v_node]:
__SCREAMING_SNAKE_CASE : List[Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__SCREAMING_SNAKE_CASE : Dict = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__SCREAMING_SNAKE_CASE : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = edge
__SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[u]
__SCREAMING_SNAKE_CASE : int = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__SCREAMING_SNAKE_CASE : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = edge
__SCREAMING_SNAKE_CASE : Tuple = self.m_component[u]
__SCREAMING_SNAKE_CASE : int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
__SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class snake_case__ ( UpperCamelCase__):
a_ = "pix2struct_text_model"
a_ = ["past_key_values"]
a_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , _A : Any=5_02_44 , _A : Optional[Any]=7_68 , _A : Tuple=64 , _A : List[str]=20_48 , _A : int=12 , _A : str=12 , _A : Any=32 , _A : Tuple=1_28 , _A : int=0.1 , _A : str=1e-6 , _A : Optional[Any]=1.0 , _A : Union[str, Any]="gelu_new" , _A : Any=0 , _A : List[str]=False , _A : Optional[Any]=0 , _A : int=1 , _A : Optional[int]=False , _A : Optional[Any]=True , **_A : List[Any] , ) -> Any:
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[str] = d_kv
UpperCAmelCase_ : int = d_ff
UpperCAmelCase_ : Optional[int] = num_layers
UpperCAmelCase_ : Union[str, Any] = num_heads
UpperCAmelCase_ : Tuple = relative_attention_num_buckets
UpperCAmelCase_ : Tuple = relative_attention_max_distance
UpperCAmelCase_ : Optional[int] = dropout_rate
UpperCAmelCase_ : int = layer_norm_epsilon
UpperCAmelCase_ : str = initializer_factor
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Tuple = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase_ : int = dense_act_fn
super().__init__(
pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , tie_word_embeddings=__A , is_decoder=__A , **__A , )
@classmethod
def A ( cls : Optional[int] , _A : Union[str, os.PathLike] , **_A : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
UpperCAmelCase_ : int = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__A , **__A )
class snake_case__ ( UpperCamelCase__):
a_ = "pix2struct_vision_model"
def __init__( self : Optional[int] , _A : int=7_68 , _A : Optional[Any]=7_68 , _A : Union[str, Any]=20_48 , _A : int=64 , _A : Union[str, Any]=12 , _A : str=12 , _A : Any="gelu_new" , _A : List[Any]=1e-6 , _A : Dict=0.0 , _A : int=0.0 , _A : int=1e-10 , _A : Dict=1.0 , _A : int=40_96 , _A : int=32 , _A : int=1_28 , **_A : Tuple , ) -> str:
super().__init__(**__A )
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Any = patch_embed_hidden_size
UpperCAmelCase_ : Dict = d_ff
UpperCAmelCase_ : List[str] = dropout_rate
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Tuple = initializer_factor
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Tuple = dense_act_fn
UpperCAmelCase_ : Tuple = seq_len
UpperCAmelCase_ : Optional[Any] = relative_attention_num_buckets
UpperCAmelCase_ : Optional[int] = relative_attention_max_distance
UpperCAmelCase_ : Optional[int] = d_kv
@classmethod
def A ( cls : str , _A : Union[str, os.PathLike] , **_A : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
UpperCAmelCase_ : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__A , **__A )
class snake_case__ ( UpperCamelCase__):
a_ = "pix2struct"
a_ = True
def __init__( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=1.0 , _A : Optional[Any]=0.02 , _A : Any=False , _A : Tuple=False , _A : Any=True , **_A : Dict , ) -> Union[str, Any]:
super().__init__(tie_word_embeddings=__A , is_encoder_decoder=__A , **__A )
if text_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
UpperCAmelCase_ : Tuple = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
UpperCAmelCase_ : Tuple = PixaStructTextConfig(**__A )
UpperCAmelCase_ : Dict = PixaStructVisionConfig(**__A )
UpperCAmelCase_ : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase_ : str = self.text_config.pad_token_id
UpperCAmelCase_ : Tuple = self.text_config.eos_token_id
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : int = self.initializer_range
UpperCAmelCase_ : int = self.initializer_range
UpperCAmelCase_ : Union[str, Any] = is_vqa
@classmethod
def A ( cls : Union[str, Any] , _A : PixaStructTextConfig , _A : PixaStructVisionConfig , **_A : Optional[int] ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def A ( self : str ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.text_config.to_dict()
UpperCAmelCase_ : Union[str, Any] = self.vision_config.to_dict()
UpperCAmelCase_ : Dict = self.__class__.model_type
return output
| 715
|
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( A : np.ndarray , A : np.ndarray , A : float = 1e-12 , A : int = 1_0_0 , ) -> tuple[float, np.ndarray]:
assert np.shape(A )[0] == np.shape(A )[1]
# Ensure proper dimensionality.
assert np.shape(A )[0] == np.shape(A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(A ) == np.iscomplexobj(A )
UpperCAmelCase_ : int = np.iscomplexobj(A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(A , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase_ : Optional[int] = np.dot(A , A )
# Normalize the resulting output vector.
UpperCAmelCase_ : Dict = w / np.linalg.norm(A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase_ : Tuple = vector.conj().T if is_complex else vector.T
UpperCAmelCase_ : List[Any] = np.dot(A , np.dot(A , A ) )
# Check convergence.
UpperCAmelCase_ : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : int = lambda_
if is_complex:
UpperCAmelCase_ : Dict = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : str = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
UpperCAmelCase_ : Optional[Any] = np.array([4_1, 4, 2_0] )
UpperCAmelCase_ : str = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase_ : Optional[Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase_ : int = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase_ : Any = real_input_matrix
UpperCAmelCase_ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase_ : int = complex_input_matrix
UpperCAmelCase_ : str = complex_vector
# Our implementation.
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = power_iteration(A , A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = np.linalg.eigh(A )
# Last eigenvalue is the maximum one.
UpperCAmelCase_ : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase_ : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(A ) - np.abs(A ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 216
| 0
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE_ ( snake_case ):
def __init__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
__SCREAMING_SNAKE_CASE : Any = field
__SCREAMING_SNAKE_CASE : Optional[Any] = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : Optional[int] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
if self.streaming:
__SCREAMING_SNAKE_CASE : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE_ :
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ) -> Any:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__SCREAMING_SNAKE_CASE : int = dataset
__SCREAMING_SNAKE_CASE : List[Any] = path_or_buf
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE : Optional[Any] = num_proc
__SCREAMING_SNAKE_CASE : int = '''utf-8'''
__SCREAMING_SNAKE_CASE : Optional[Any] = to_json_kwargs
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.to_json_kwargs.pop('''path_or_buf''' , lowercase )
__SCREAMING_SNAKE_CASE : Tuple = self.to_json_kwargs.pop('''orient''' , '''records''' )
__SCREAMING_SNAKE_CASE : Tuple = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__SCREAMING_SNAKE_CASE : Tuple = self.to_json_kwargs.pop('''compression''' , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowercase ) as buffer:
__SCREAMING_SNAKE_CASE : Optional[Any] = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
__SCREAMING_SNAKE_CASE : int = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _snake_case ( self , lowercase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = args
__SCREAMING_SNAKE_CASE : Optional[int] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
__SCREAMING_SNAKE_CASE : Dict = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , **lowercase , ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__SCREAMING_SNAKE_CASE : List[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowercase )
return written
| 158
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_A = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def A_ ( __SCREAMING_SNAKE_CASE : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__SCREAMING_SNAKE_CASE : Dict = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__SCREAMING_SNAKE_CASE : str = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 158
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__A , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(__A , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[16, 32, 64, 128] , __UpperCAmelCase=[1, 4, 8, 16] , __UpperCAmelCase=[1, 2, 4, 8] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = num_encoder_blocks
__UpperCamelCase = sr_ratios
__UpperCamelCase = depths
__UpperCamelCase = hidden_sizes
__UpperCamelCase = downsampling_rates
__UpperCamelCase = num_attention_heads
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = scope
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = SegformerModel(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A )
__UpperCamelCase = __UpperCamelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__UpperCamelCase = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 1
__UpperCamelCase = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__A )
__UpperCamelCase = model(__A , labels=__A )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = SegformerModelTester(self )
__UpperCamelCase = SegformerConfigTester(self , config_class=__A )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip('SegFormer does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __A )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
__UpperCamelCase = outputs.attentions
__UpperCamelCase = sum(self.model_tester.depths )
self.assertEqual(len(__A ) , __A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase = True
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
__UpperCamelCase = (self.model_tester.image_size // 4) ** 2
__UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__UpperCamelCase = (self.model_tester.image_size // 32) ** 2
__UpperCamelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__UpperCamelCase = len(__A )
# Check attention is always last and order is fine
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
__UpperCamelCase = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
__UpperCamelCase = (self.model_tester.image_size // 4) ** 2
__UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__A , __A ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__A , __A , __A )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
__UpperCamelCase = model_class(__A )
model.to(__A )
model.train()
__UpperCamelCase = self._prepare_for_class(__A , __A , return_labels=__A )
__UpperCamelCase = model(**__A ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A ( ) -> List[Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__A )
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__A , return_tensors='pt' )
__UpperCamelCase = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__UpperCamelCase = model(__A )
__UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCamelCase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(__A )
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__A , return_tensors='pt' )
__UpperCamelCase = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__UpperCamelCase = model(__A )
__UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCamelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1E-1 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
__UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__A )
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__A , return_tensors='pt' )
__UpperCamelCase = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
__UpperCamelCase = model(__A )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __A )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__A )
__UpperCamelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __A )
| 715
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ):
'''simple docstring'''
__UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = apply_ocr
def UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'apply_ocr' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase )
self.assertIsInstance(encoding.boxes , __UpperCAmelCase )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase )
self.assertListEqual(encoding.boxes , __UpperCAmelCase )
# with apply_OCR = False
__UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 293
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase : str =abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __lowerCamelCase : int ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase_ :Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCamelCase ,id=__lowerCamelCase )
| 172
| 0
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def _snake_case ( A , A , A = 16000 ) -> Tuple:
lowerCAmelCase__ = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase__ ) <= sample_length:
return wav
lowerCAmelCase__ = randint(0 , len(lowerCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase__ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
lowercase__ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
lowercase__ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , )
lowercase__ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"} )
lowercase__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase__ : float = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
lowercase__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase__ : Optional[bool] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , _a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def _snake_case ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCAmelCase__ = DatasetDict()
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCAmelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCAmelCase__ = feature_extractor.model_input_names[0]
def train_transforms(A ):
lowerCAmelCase__ = []
for audio in batch[data_args.audio_column_name]:
lowerCAmelCase__ = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase__ )
lowerCAmelCase__ = feature_extractor(lowerCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(A ):
lowerCAmelCase__ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCAmelCase__ = feature_extractor(lowerCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase__ = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCAmelCase__ , lowerCAmelCase__ = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowerCAmelCase__ = str(lowerCAmelCase__ )
lowerCAmelCase__ = label
# Load the accuracy metric from the datasets package
lowerCAmelCase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(A ):
lowerCAmelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=eval_pred.label_ids )
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase__ , output_all_columns=lowerCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase__ , output_all_columns=lowerCAmelCase__ )
# Initialize our trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowerCAmelCase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 721
|
'''simple docstring'''
import numpy as np
import qiskit
def _snake_case ( A = 8 , A = None ) -> str:
lowerCAmelCase__ = np.random.default_rng(seed=A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase__ = rng.integers(2 , size=A )
# The set of states Alice will prepare.
lowerCAmelCase__ = rng.integers(2 , size=A )
# Measurement basis for Bob's qubits.
lowerCAmelCase__ = rng.integers(2 , size=A )
# Quantum Circuit to simulate BB84
lowerCAmelCase__ = qiskit.QuantumCircuit(A , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A ):
if alice_state[index] == 1:
bbaa_circ.x(A )
if alice_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A ):
if bob_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase__ = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase__ = qiskit.execute(A , A , shots=1 , seed_simulator=A )
# Returns the result of measurement.
lowerCAmelCase__ = job.result().get_counts(A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase__ = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A , A , A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase__ = gen_key[:key_len] if len(A ) >= key_len else gen_key.ljust(A , '''0''' )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 98
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_lowerCamelCase , _lowerCamelCase : str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_lowerCamelCase : Dict = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_lowerCamelCase : str = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCamelCase : str = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 663
|
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 663
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
@slow
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_SCREAMING_SNAKE_CASE = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
_SCREAMING_SNAKE_CASE = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
_SCREAMING_SNAKE_CASE = model(A_ , labels=A_ ).loss
_SCREAMING_SNAKE_CASE = -tf.math.reduce_mean(A_ ).numpy()
_SCREAMING_SNAKE_CASE = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 168
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __snake_case( unittest.TestCase ):
def A ( self , A_ , A_ , A_ ):
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
ops.enable_eager_execution_internal()
_SCREAMING_SNAKE_CASE = tf.config.list_physical_devices('''CPU''' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_SCREAMING_SNAKE_CASE = tf.config.list_logical_devices(device_type='''CPU''' )
_SCREAMING_SNAKE_CASE = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_SCREAMING_SNAKE_CASE = GradientAccumulator()
_SCREAMING_SNAKE_CASE = tf.Variable([4.0, 3.0] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = create_optimizer(5e-5 , 10 , 5 )
_SCREAMING_SNAKE_CASE = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ , A_ ):
with strategy.scope():
_SCREAMING_SNAKE_CASE = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ , A_ ):
_SCREAMING_SNAKE_CASE = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 168
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Dict ={
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] =['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] =['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] =[
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any =[
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 206
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : str = 0
@slow
def A( self):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(lowercase__) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(lowercase__) , 0)
def A( self):
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def A( self):
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def A( self):
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
# Check that tokenizer_type ≠ model_type
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ , config=lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def A( self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowercase__ , '''vocab.txt'''))
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''bert''' , use_fast=lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowercase__ , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowercase__ , '''merges.txt'''))
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''gpt2''' , use_fast=lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@require_tokenizers
def A( self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowercase__ , '''vocab.txt'''))
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''bert''')
self.assertIsInstance(lowercase__ , lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowercase__ , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowercase__ , '''merges.txt'''))
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''gpt2''')
self.assertIsInstance(lowercase__ , lowercase__)
def A( self):
with pytest.raises(lowercase__):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''')
@require_tokenizers
def A( self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''')
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
if isinstance(lowercase__ , lowercase__):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase__)
else:
self.assertEqual(tokenizer.do_lower_case , lowercase__)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def A( self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''')
def A( self):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__UpperCAmelCase : Tuple = TOKENIZER_MAPPING.values()
__UpperCAmelCase : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase__)
@require_tokenizers
def A( self):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowercase__) , lowercase__)
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''') , lowercase__)
@require_tokenizers
def A( self):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowercase__)
__UpperCAmelCase : str = '''Hello, world. How are you?'''
__UpperCAmelCase : str = tokenizer.tokenize(lowercase__)
self.assertEqual('''[UNK]''' , tokens[0])
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowercase__)
__UpperCAmelCase : List[Any] = tokenizer.tokenize(lowercase__)
self.assertEqual('''[UNK]''' , tokens[0])
@require_tokenizers
def A( self):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''')
self.assertEqual(type(lowercase__) , lowercase__)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , '''[UNK]''')
self.assertEqual(tokenizer.padding_side , '''right''')
self.assertEqual(tokenizer.truncation_side , '''right''')
def A( self):
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def A( self):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''ctrl''')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase__ , lowercase__)
def A( self):
# Check we can load the tokenizer config of an online model.
__UpperCAmelCase : Union[str, Any] = get_tokenizer_config('''bert-base-cased''')
__UpperCAmelCase : List[Any] = config.pop('''_commit_hash''' , lowercase__)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase__ , {'''do_lower_case''': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCAmelCase : Optional[Any] = get_tokenizer_config(lowercase__)
self.assertDictEqual(lowercase__ , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Any = get_tokenizer_config(lowercase__)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''')
def A( self):
try:
AutoConfig.register('''custom''' , lowercase__)
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__):
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
__UpperCAmelCase : Dict = CustomTokenizer.from_pretrained(lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def A( self):
try:
AutoConfig.register('''custom''' , lowercase__)
# Can register in two steps
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(lowercase__ , fast_tokenizer_class=lowercase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase__ , slow_tokenizer_class=lowercase__ , fast_tokenizer_class=lowercase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__):
AutoTokenizer.register(lowercase__ , fast_tokenizer_class=lowercase__)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : List[Any] = BertTokenizerFast.from_pretrained(lowercase__)
bert_tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : str = CustomTokenizerFast.from_pretrained(lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowercase__ , use_fast=lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A( self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase__):
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase__):
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowercase__ , trust_remote_code=lowercase__)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__ , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
@require_tokenizers
def A( self):
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = False
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = NewTokenizer
_lowerCAmelCase : str = False
try:
AutoConfig.register('''custom''' , lowercase__)
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
AutoTokenizer.register(lowercase__ , fast_tokenizer_class=lowercase__)
# If remote code is not set, the default is to use local
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertTrue(tokenizer.special_attribute_present)
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A( self):
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
def A( self):
with self.assertRaisesRegex(
lowercase__ , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''bert-base''')
def A( self):
with self.assertRaisesRegex(
lowercase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__ , revision='''aaaaaa''')
def A( self):
# Make sure we have cached the tokenizer.
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
with RequestCounter() as counter:
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 675
|
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase = """imagegpt"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any] , snake_case_ : Dict=5_1_2 + 1 , snake_case_ : int=3_2 * 3_2 , snake_case_ : Dict=5_1_2 , snake_case_ : Any=2_4 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Any="quick_gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : str=True , snake_case_ : int=True , snake_case_ : int=False , snake_case_ : int=False , snake_case_ : Union[str, Any]=False , **snake_case_ : Dict , ):
'''simple docstring'''
snake_case__ : Optional[int] = vocab_size
snake_case__ : List[str] = n_positions
snake_case__ : Dict = n_embd
snake_case__ : Tuple = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : Optional[Any] = n_inner
snake_case__ : Union[str, Any] = activation_function
snake_case__ : Optional[int] = resid_pdrop
snake_case__ : Tuple = embd_pdrop
snake_case__ : Dict = attn_pdrop
snake_case__ : List[str] = layer_norm_epsilon
snake_case__ : Any = initializer_range
snake_case__ : int = scale_attn_weights
snake_case__ : int = use_cache
snake_case__ : Any = scale_attn_by_inverse_layer_idx
snake_case__ : Union[str, Any] = reorder_and_upcast_attn
snake_case__ : str = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case_ , **snake_case_ )
class a ( lowerCamelCase__ ):
"""simple docstring"""
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __magic_name__ ( self : List[Any] , snake_case_ : "FeatureExtractionMixin" , snake_case_ : int = 1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , snake_case_ : int = 3 , snake_case_ : int = 3_2 , snake_case_ : int = 3_2 , ):
'''simple docstring'''
snake_case__ : Dict = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Dict = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
return inputs
| 347
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case ( _lowerCAmelCase : Tuple ) -> Optional[Any]:
A_ : Optional[Any] = create_tensor(_lowerCAmelCase )
A_ : Dict = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Any:
A_ : int = [state.process_index]
A_ : Union[str, Any] = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, f"{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Optional[Any] = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case ( _lowerCAmelCase : Dict ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
A_ : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : List[str] = torch.arange(state.num_processes ).to(state.device )
A_ : Any = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : str = create_tensor(_lowerCAmelCase )
A_ : int = reduce(_lowerCAmelCase , "sum" )
A_ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Tuple = reduce(_lowerCAmelCase , "mean" )
A_ : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : List[str] ) -> Dict:
# For xla_spawn (TPUs)
main()
def __snake_case ( ) -> List[str]:
A_ : Tuple = PartialState()
state.print(f"State: {state}" )
state.print("testing gather" )
test_gather(_lowerCAmelCase )
state.print("testing gather_object" )
test_gather_object(_lowerCAmelCase )
state.print("testing broadcast" )
test_broadcast(_lowerCAmelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowerCAmelCase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowerCAmelCase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
| 0
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=2 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=36 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=6 , _lowerCamelCase=6 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1000 , ) -> Tuple:
A_ : Dict = parent
A_ : Tuple = batch_size
A_ : str = num_channels
A_ : List[Any] = image_size
A_ : Any = patch_size
A_ : Dict = text_seq_length
A_ : Optional[Any] = is_training
A_ : Optional[int] = use_input_mask
A_ : List[Any] = use_token_type_ids
A_ : str = use_labels
A_ : int = vocab_size
A_ : Dict = hidden_size
A_ : Any = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : Tuple = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Dict = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : List[Any] = coordinate_size
A_ : str = shape_size
A_ : List[Any] = num_labels
A_ : int = num_choices
A_ : str = scope
A_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ : Union[str, Any] = text_seq_length
A_ : Union[str, Any] = (image_size // patch_size) ** 2 + 1
A_ : Union[str, Any] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : Dict = bbox[i, j, 3]
A_ : Any = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : List[str] = bbox[i, j, 2]
A_ : int = bbox[i, j, 0]
A_ : Optional[int] = t
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Any = None
if self.use_input_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ : Optional[Any] = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ : Any = None
A_ : List[Any] = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Optional[int] = LayoutLMvaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# text + image
A_ : Any = model(_lowerCamelCase , pixel_values=_lowerCamelCase )
A_ : List[Any] = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Dict = model(_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ : str = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ : Dict = model(pixel_values=_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.num_labels
A_ : Union[str, Any] = LayoutLMvaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[Any] = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
A_ : Tuple = self.num_labels
A_ : Union[str, Any] = LayoutLMvaForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : Tuple = LayoutLMvaForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Any = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = LayoutLMvaModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Tuple:
A_ : Dict = copy.deepcopy(_lowerCamelCase )
if model_class in get_values(_lowerCamelCase ):
A_ : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCamelCase ):
A_ : Any = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
elif model_class in get_values(_lowerCamelCase ):
A_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
A_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
elif model_class in [
*get_values(_lowerCamelCase ),
]:
A_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
elif model_class in [
*get_values(_lowerCamelCase ),
]:
A_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCamelCase , )
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Any = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = LayoutLMvaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(_lowerCamelCase )
A_ : Dict = self.default_image_processor
A_ : Optional[Any] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
A_ : str = torch.tensor([[1, 2]] )
A_ : List[str] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A_ : Union[str, Any] = model(
input_ids=input_ids.to(_lowerCamelCase ) , bbox=bbox.to(_lowerCamelCase ) , pixel_values=pixel_values.to(_lowerCamelCase ) , )
# verify the logits
A_ : List[str] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCamelCase )
A_ : int = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 385
|
'''simple docstring'''
from __future__ import annotations
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Dict:
A_ : List[Any] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(_lowerCamelCase ) != 0:
A_ : List[str] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(_lowerCamelCase , (int, float) ):
raise error
A_ : str = rows
else:
A_ : Optional[int] = []
def UpperCAmelCase_ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.rows )
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.rows[0] )
@property
def UpperCAmelCase_ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase_ ( self ) -> bool:
return self.order[0] == self.order[1]
def UpperCAmelCase_ ( self ) -> Matrix:
A_ : Dict = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase_ ( self ) -> bool:
return bool(self.determinant() )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowerCamelCase ).determinant()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(_lowerCamelCase , _lowerCamelCase )
return -1 * self.get_minor(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(_lowerCamelCase , _lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase_ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase_ ( self ) -> Matrix:
A_ : Union[str, Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Matrix:
A_ : Optional[Any] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(_lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> None:
A_ : str = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(_lowerCamelCase , (int, float) ):
raise type_error
if len(_lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(_lowerCamelCase )
else:
A_ : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> None:
A_ : int = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(_lowerCamelCase , (int, float) ):
raise type_error
if len(_lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
A_ : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A_ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _lowerCamelCase ) -> bool:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _lowerCamelCase ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , _lowerCamelCase ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _lowerCamelCase ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _lowerCamelCase ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(_lowerCamelCase , _lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , _lowerCamelCase ) -> Matrix:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
A_ : Optional[int] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase ) -> int:
return sum(row[i] * column[i] for i in range(len(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385
| 1
|
from __future__ import annotations
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE__ = result + left + right
return input_list
def __snake_case ( lowerCAmelCase_ ) -> list:
if len(lowerCAmelCase_ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE__ = list(lowerCAmelCase_ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE__ = 2
while p <= len(lowerCAmelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i + p - 1
SCREAMING_SNAKE_CASE__ = (low + high + 1) // 2
SCREAMING_SNAKE_CASE__ = merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = merge(lowerCAmelCase_ , 0 , lowerCAmelCase_ , len(lowerCAmelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_A : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_A : Dict = []
else:
_A : Any = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 100
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = (KDPMaDiscreteScheduler,)
UpperCamelCase_ :str = 1_0
def UpperCAmelCase_ ( self , **_lowercase )-> str:
UpperCamelCase_ = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_lowercase )
return config
def UpperCAmelCase_ ( self )-> Union[str, Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase_ ( self )-> int:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase_ ( self )-> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase_ ( self )-> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def UpperCAmelCase_ ( self )-> Dict:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase_ = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def UpperCAmelCase_ ( self )-> Optional[int]:
if torch_device == "mps":
return
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase_ = scheduler.scale_model_input(_lowercase , _lowercase )
UpperCamelCase_ = model(_lowercase , _lowercase )
UpperCamelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = output.prev_sample
UpperCamelCase_ = torch.sum(torch.abs(_lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(_lowercase ) )
if str(_lowercase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 628
| 0
|
"""simple docstring"""
snake_case = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case = [None] * 1_0_0_0_0_0_0_0
snake_case = True
snake_case = False
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE = chain(next_number(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = number_chain
while number < 1_0_0_0_0_0_0_0:
SCREAMING_SNAKE_CASE = number_chain
number *= 1_0
return number_chain
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0_0 ):
for i in range(1, SCREAMING_SNAKE_CASE_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 406
|
"""simple docstring"""
import os
import sys
import unittest
snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
snake_case = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
snake_case = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_test_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_test_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {'BertModelTest': 'BertModelTester'}
SCREAMING_SNAKE_CASE = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_model_to_test_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_model_to_test_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
SCREAMING_SNAKE_CASE = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_model_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_model_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
SCREAMING_SNAKE_CASE = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
| 406
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a = logging.get_logger(__name__)
__a = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = '''nat'''
lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=[3, 4, 6, 5] ,_SCREAMING_SNAKE_CASE=[2, 4, 8, 16] ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3.0 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : Optional[int] = kernel_size
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
UpperCAmelCase_ : Dict = layer_scale_init_value
UpperCAmelCase_ : Any = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 )]
UpperCAmelCase_, UpperCAmelCase_ : Dict = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,stage_names=self.stage_names )
| 30
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
| 30
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str=False ):
"""simple docstring"""
snake_case__ : Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[str] =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Optional[Any] =''''''
else:
snake_case__ : Tuple ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : str =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ : int =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Tuple =in_proj_weight[
: config.hidden_size, :
]
snake_case__ : str =in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Union[str, Any] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : str =in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Union[str, Any] =in_proj_bias[-config.hidden_size :]
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
snake_case__ : Union[str, Any] =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Optional[int] =dct.pop(SCREAMING_SNAKE_CASE )
snake_case__ : str =val
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : str ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Tuple =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=True ):
"""simple docstring"""
snake_case__ : Dict =ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case__ : Optional[Any] =8
# set labels if required
if not base_model:
snake_case__ : Union[str, Any] =10_00
snake_case__ : Optional[Any] ='''huggingface/label-files'''
snake_case__ : List[Any] ='''imagenet-1k-id2label.json'''
snake_case__ : Any =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : int ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case__ : int =idalabel
snake_case__ : Optional[int] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case__ : List[Any] =3_84
snake_case__ : int =15_36
snake_case__ : Any =12
snake_case__ : List[Any] =6
# load original model from torch hub
snake_case__ : List[Any] =torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : List[Any] =original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
snake_case__ : str =create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
snake_case__ : str =ViTModel(SCREAMING_SNAKE_CASE , add_pooling_layer=SCREAMING_SNAKE_CASE ).eval()
else:
snake_case__ : Optional[int] =ViTForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case__ : int =ViTImageProcessor()
snake_case__ : str =image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ : List[Any] =encoding['''pixel_values''']
snake_case__ : str =model(SCREAMING_SNAKE_CASE )
if base_model:
snake_case__ : Optional[Any] =original_model(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
snake_case__ : Optional[int] =original_model(SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 408
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
snake_case__ : str =parent
snake_case__ : List[str] =batch_size
snake_case__ : Any =seq_length
snake_case__ : int =is_training
snake_case__ : List[Any] =use_input_mask
snake_case__ : int =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Tuple =vocab_size
snake_case__ : Union[str, Any] =hidden_size
snake_case__ : Union[str, Any] =num_hidden_layers
snake_case__ : List[Any] =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : Optional[int] =hidden_act
snake_case__ : Dict =hidden_dropout_prob
snake_case__ : List[Any] =attention_probs_dropout_prob
snake_case__ : str =max_position_embeddings
snake_case__ : Dict =type_vocab_size
snake_case__ : Tuple =type_sequence_label_size
snake_case__ : Optional[int] =initializer_range
snake_case__ : int =num_labels
snake_case__ : Tuple =num_choices
snake_case__ : int =scope
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] =None
if self.use_input_mask:
snake_case__ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[str] =None
if self.use_token_type_ids:
snake_case__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Any =None
snake_case__ : List[Any] =None
snake_case__ : Dict =None
if self.use_labels:
snake_case__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : int =OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
snake_case__ : List[Any] =True
snake_case__ : Optional[Any] =OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
snake_case__ : str =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
snake_case__ : str =OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =True
snake_case__ : Union[str, Any] =True
snake_case__ : str =OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
snake_case__ : Optional[Any] =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
snake_case__ : Any =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : List[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : str =torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Dict =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
snake_case__ : Dict =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
snake_case__ : str =ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : List[str] =output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : str =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] =self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : Tuple =config_and_inputs
snake_case__ : List[str] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase__ =(OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ =(
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ =False
lowerCAmelCase__ =False
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : Any =OpenLlamaModelTester(self )
snake_case__ : List[str] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Union[str, Any] =type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__, snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] =3
snake_case__ : List[str] =input_dict['''input_ids''']
snake_case__ : Any =input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : Optional[Any] =OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__, snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] =3
snake_case__ : List[str] ='''single_label_classification'''
snake_case__ : int =input_dict['''input_ids''']
snake_case__ : Optional[int] =input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : str =OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__, snake_case__ : int =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str =3
snake_case__ : Tuple ='''multi_label_classification'''
snake_case__ : List[str] =input_dict['''input_ids''']
snake_case__ : Dict =input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Any =OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
snake_case__, snake_case__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] =ids_tensor([1, 10] , config.vocab_size )
snake_case__ : str =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Tuple =OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
snake_case__ : Tuple =original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
snake_case__ : Dict =original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Dict ={'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : Union[str, Any] =OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
snake_case__ : Tuple =scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
snake_case__ : List[Any] =scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
| 408
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a__ : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any]) -> None:
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 622
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowerCAmelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _lowerCAmelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _lowerCAmelCase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('https://huggingface.co' )
| 622
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
a : Tuple = set()
# Replace all the whitespace in our sentence
a : List[Any] = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(snake_case ) == 26
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
a : Tuple = [False] * 26
for char in input_str:
if char.islower():
a : Optional[Any] = True
elif char.isupper():
a : Optional[Any] = True
return all(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
a : Dict = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=snake_case ) )
print(timeit('is_pangram_faster()' , setup=snake_case ) )
print(timeit('is_pangram_fastest()' , setup=snake_case ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 610
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610
| 1
|
from __future__ import annotations
from typing import Any
class a ( __lowerCAmelCase ):
"""simple docstring"""
pass
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> None:
_A = data
_A = None
def __iter__( self ) -> Any:
_A = self
_A = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase_ )
yield node.data
_A = node.next_node
@property
def UpperCAmelCase ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 401
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_SCREAMING_SNAKE_CASE = HfArgumentParser(InitializationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_SCREAMING_SNAKE_CASE = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 401
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( a_ ):
_lowerCamelCase : Any= "speech_to_text_2"
_lowerCamelCase : str= ["past_key_values"]
_lowerCamelCase : Optional[int]= {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _snake_case=1_0000 , _snake_case=6 , _snake_case=2048 , _snake_case=4 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=2 , _snake_case=True , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=1024 , **_snake_case , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[int] = d_model
UpperCAmelCase_ : int = decoder_ffn_dim
UpperCAmelCase_ : str = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Tuple = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Dict = activation_dropout
UpperCAmelCase_ : int = activation_function
UpperCAmelCase_ : List[str] = init_std
UpperCAmelCase_ : int = decoder_layerdrop
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : Optional[int] = decoder_layers
UpperCAmelCase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ : str = max_target_positions
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 471
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> str:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : str = scope
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) -> Union[str, Any]:
UpperCAmelCase_ : str = OpenLlamaModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case , attention_mask=_snake_case)
UpperCAmelCase_ : List[str] = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> int:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[Any] = OpenLlamaModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
UpperCAmelCase_ : Any = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
UpperCAmelCase_ : Optional[int] = model(_snake_case , attention_mask=_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Any:
UpperCAmelCase_ : Optional[int] = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Union[str, Any]:
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
# first forward pass
UpperCAmelCase_ : List[str] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
UpperCAmelCase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase_ : Optional[Any] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
UpperCAmelCase_ : List[str] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
UpperCAmelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
def _snake_case ( self) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( a_, a_, a_, unittest.TestCase ):
_lowerCamelCase : Dict= (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowerCamelCase : str= (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowerCamelCase : Optional[int]= (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple= False
_lowerCamelCase : int= False
def _snake_case ( self) -> int:
UpperCAmelCase_ : Any = OpenLlamaModelTester(self)
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def _snake_case ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self) -> str:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : List[Any] = type
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : List[str] = input_dict['input_ids']
UpperCAmelCase_ : Optional[Any] = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ : List[Any] = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Tuple = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : int = 'single_label_classification'
UpperCAmelCase_ : int = input_dict['input_ids']
UpperCAmelCase_ : Any = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ : int = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[str] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Any = 'multi_label_classification'
UpperCAmelCase_ : Tuple = input_dict['input_ids']
UpperCAmelCase_ : str = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
UpperCAmelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[Any] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _snake_case ( self) -> Optional[int]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _snake_case ( self , _snake_case) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase_ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : int = OpenLlamaModel(_snake_case)
original_model.to(_snake_case)
original_model.eval()
UpperCAmelCase_ : Optional[Any] = original_model(_snake_case).last_hidden_state
UpperCAmelCase_ : Tuple = original_model(_snake_case).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Dict = {'type': scaling_type, 'factor': 10.0}
UpperCAmelCase_ : Union[str, Any] = OpenLlamaModel(_snake_case)
scaled_model.to(_snake_case)
scaled_model.eval()
UpperCAmelCase_ : Dict = scaled_model(_snake_case).last_hidden_state
UpperCAmelCase_ : Optional[int] = scaled_model(_snake_case).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5))
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
| 471
| 1
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple):
_A : List[Any] = ''
_A : str = ''
_A : Any = []
_A : Tuple = 0
_A : List[str] = 256
_A : Optional[Any] = 0
_A : Tuple = 0
_A : Tuple = 0
_A : List[Any] = 0
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : int):
_A : Optional[int] = cva.imread(SCREAMING_SNAKE_CASE , 0)
_A : Union[str, Any] = copy.deepcopy(self.img)
_A , _A , _A : Optional[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x')
_A : List[str] = np.sum(SCREAMING_SNAKE_CASE)
for i in range(len(SCREAMING_SNAKE_CASE)):
_A : Tuple = x[i] / self.k
self.sk += prk
_A : Tuple = (self.L - 1) * self.sk
if self.rem != 0:
_A : Optional[int] = int(last % last)
_A : Optional[int] = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(SCREAMING_SNAKE_CASE)
_A : List[str] = int(np.ma.count(self.img) / self.img[1].size)
_A : str = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
_A : Optional[int] = self.img[j][i]
if num != self.last_list[num]:
_A : List[Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img)
def A ( self : Optional[int]):
plt.hist(self.img.ravel() , 256 , [0, 256])
def A ( self : Optional[int]):
cva.imshow('Output-Image' , self.img)
cva.imshow('Input-Image' , self.original_image)
cva.waitKey(5000)
cva.destroyAllWindows()
if __name__ == "__main__":
A : Optional[int] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 128
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase__ ( lowerCamelCase : Callable[[int | float], int | float] ,lowerCamelCase : int | float ,lowerCamelCase : int | float ,lowerCamelCase : int = 100 ,):
_A : Tuple = x_start
_A : List[str] = fnc(lowerCamelCase )
_A : Dict = 0.0
for _ in range(lowerCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A : Tuple = (x_end - x_start) / steps + xa
_A : Any = fnc(lowerCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_A : Optional[int] = xa
_A : List[str] = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase__ ( lowerCamelCase : Any ):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
A : Optional[Any] = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 128
| 1
|
import math
def _lowerCamelCase ( __A : int ) -> int:
if not isinstance(__A , __A ):
_UpperCAmelCase : List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__A )
if number < 1:
_UpperCAmelCase : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(__A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_UpperCAmelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
_UpperCAmelCase : Dict = [3, 5]
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Dict = 3
for block in range(1 , __A ):
for _ in range(__A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
SCREAMING_SNAKE_CASE = 0
try:
SCREAMING_SNAKE_CASE = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 707
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE = object
def _lowerCamelCase ( *__A : List[Any] , **__A : Any ) -> Any:
pass
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/serving')
def _lowerCamelCase ( __A : Namespace ) -> Tuple:
_UpperCAmelCase : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__A , args.host , args.port , args.workers )
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : dict
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[List[int]]
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any
class A_ ( __lowercase ):
'''simple docstring'''
@staticmethod
def snake_case__ ( _A) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''')
serve_parser.add_argument(
'''--task''' , type=_A , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=_A , default='''localhost''' , help='''Interface the server will listen on.''')
serve_parser.add_argument('''--port''' , type=_A , default=8888 , help='''Port the serving will listen to.''')
serve_parser.add_argument('''--workers''' , type=_A , default=1 , help='''Number of http workers''')
serve_parser.add_argument('''--model''' , type=_A , help='''Model\'s name or path to stored model.''')
serve_parser.add_argument('''--config''' , type=_A , help='''Model\'s config name or path to stored model.''')
serve_parser.add_argument('''--tokenizer''' , type=_A , help='''Tokenizer name to use.''')
serve_parser.add_argument(
'''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=_A)
def __init__( self , _A , _A , _A , _A) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = pipeline
_UpperCAmelCase : List[Any] = host
_UpperCAmelCase : Tuple = port
_UpperCAmelCase : Union[str, Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''')
else:
logger.info(f'''Serving model over {host}:{port}''')
_UpperCAmelCase : str = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=_A , response_class=_A , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=_A , response_class=_A , methods=['''POST'''] , ),
] , timeout=600 , )
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def snake_case__ ( self , _A = Body(_A , embed=_A) , _A = Body(_A , embed=_A)) -> List[Any]:
"""simple docstring"""
try:
_UpperCAmelCase : int = self._pipeline.tokenizer.tokenize(_A)
if return_ids:
_UpperCAmelCase : List[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(_A)
return ServeTokenizeResult(tokens=_A , tokens_ids=_A)
else:
return ServeTokenizeResult(tokens=_A)
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A)})
def snake_case__ ( self , _A = Body(_A , embed=_A) , _A = Body(_A , embed=_A) , _A = Body(_A , embed=_A) , ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase : Optional[Any] = self._pipeline.tokenizer.decode(_A , _A , _A)
return ServeDeTokenizeResult(model='''''' , text=_A)
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A)})
async def snake_case__ ( self , _A=Body(_A , embed=_A)) -> str:
"""simple docstring"""
if len(_A) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
_UpperCAmelCase : Any = self._pipeline(_A)
return ServeForwardResult(output=_A)
except Exception as e:
raise HTTPException(500 , {'''error''': str(_A)})
| 186
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Dict = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = """donut-swin"""
A_ : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , _A : List[Any]=224 , _A : int=4 , _A : Tuple=3 , _A : int=96 , _A : List[Any]=[2, 2, 6, 2] , _A : Tuple=[3, 6, 12, 24] , _A : int=7 , _A : Any=4.0 , _A : Dict=True , _A : str=0.0 , _A : Optional[int]=0.0 , _A : List[Any]=0.1 , _A : Tuple="gelu" , _A : List[Any]=False , _A : int=0.02 , _A : Optional[Any]=1E-5 , **_A : Union[str, Any] , ) -> str:
super().__init__(**_A )
__magic_name__ : Any = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : int = num_channels
__magic_name__ : Optional[int] = embed_dim
__magic_name__ : List[str] = depths
__magic_name__ : int = len(_A )
__magic_name__ : Union[str, Any] = num_heads
__magic_name__ : List[str] = window_size
__magic_name__ : Dict = mlp_ratio
__magic_name__ : Union[str, Any] = qkv_bias
__magic_name__ : int = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : List[str] = drop_path_rate
__magic_name__ : Any = hidden_act
__magic_name__ : List[str] = use_absolute_embeddings
__magic_name__ : Union[str, Any] = layer_norm_eps
__magic_name__ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : List[Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
| 561
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__magic_name__ : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A , cache_dir=_A )
__magic_name__ : Any = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A )[0] , 'snapshots' ) )]
__magic_name__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) -> List[Any]:
__magic_name__ , __magic_name__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A )
__magic_name__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : List[Any] = jax.random.PRNGKey(0 )
__magic_name__ : Optional[Any] = 4
__magic_name__ : str = jax.device_count()
__magic_name__ : Any = num_samples * [prompt]
__magic_name__ : Union[str, Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : Optional[Any] = replicate(_A )
__magic_name__ : Dict = jax.random.split(_A , _A )
__magic_name__ : Union[str, Any] = shard(_A )
__magic_name__ : Union[str, Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(_A , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__magic_name__ : Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_A ) == num_samples
def __lowerCAmelCase ( self : str ) -> Optional[int]:
__magic_name__ , __magic_name__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_A )
__magic_name__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : int = jax.random.PRNGKey(0 )
__magic_name__ : Tuple = 50
__magic_name__ : Any = jax.device_count()
__magic_name__ : Dict = num_samples * [prompt]
__magic_name__ : List[Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : str = replicate(_A )
__magic_name__ : Any = jax.random.split(_A , _A )
__magic_name__ : Optional[Any] = shard(_A )
__magic_name__ : Optional[Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
__magic_name__ , __magic_name__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A )
__magic_name__ : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : List[str] = jax.random.PRNGKey(0 )
__magic_name__ : int = 50
__magic_name__ : Optional[Any] = jax.device_count()
__magic_name__ : int = num_samples * [prompt]
__magic_name__ : int = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : int = replicate(_A )
__magic_name__ : List[Any] = jax.random.split(_A , _A )
__magic_name__ : Optional[int] = shard(_A )
__magic_name__ : Union[str, Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ , __magic_name__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
__magic_name__ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : int = jax.random.PRNGKey(0 )
__magic_name__ : Tuple = 50
__magic_name__ : int = jax.device_count()
__magic_name__ : Union[str, Any] = num_samples * [prompt]
__magic_name__ : Dict = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : List[str] = replicate(_A )
__magic_name__ : str = jax.random.split(_A , _A )
__magic_name__ : Tuple = shard(_A )
__magic_name__ : Optional[Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : List[str] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_A , steps_offset=1 , )
__magic_name__ , __magic_name__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , )
__magic_name__ : Tuple = scheduler.create_state()
__magic_name__ : Optional[Any] = scheduler_state
__magic_name__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : Dict = jax.random.PRNGKey(0 )
__magic_name__ : List[Any] = 50
__magic_name__ : Any = jax.device_count()
__magic_name__ : List[str] = num_samples * [prompt]
__magic_name__ : List[Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : Union[str, Any] = replicate(_A )
__magic_name__ : Tuple = jax.random.split(_A , _A )
__magic_name__ : str = shard(_A )
__magic_name__ : int = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
__magic_name__ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : Union[str, Any] = jax.device_count()
__magic_name__ : Optional[Any] = num_samples * [prompt]
__magic_name__ : Dict = jax.random.split(jax.random.PRNGKey(0 ) , _A )
__magic_name__ , __magic_name__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , )
__magic_name__ : Union[str, Any] = replicate(_A )
__magic_name__ : Tuple = pipeline.prepare_inputs(_A )
__magic_name__ : Union[str, Any] = shard(_A )
__magic_name__ : str = pipeline(_A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__magic_name__ : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__magic_name__ , __magic_name__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , )
__magic_name__ : List[Any] = replicate(_A )
__magic_name__ : Optional[int] = pipeline.prepare_inputs(_A )
__magic_name__ : Tuple = shard(_A )
__magic_name__ : Dict = pipeline(_A , _A , _A , jit=_A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__magic_name__ : Dict = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 561
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__a : List[Any] = None
__a : str = logging.get_logger(__name__)
__a : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__a : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
__a : str = {
"camembert-base": 5_1_2,
}
__a : int = "▁"
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : str = VOCAB_FILES_NAMES
__a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = ['''input_ids''', '''attention_mask''']
__a : Optional[int] = CamembertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
__lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 721
|
from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , a % b )
__lowercase = a // b
return (y, x - k * y)
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
if b < 0:
__lowercase = (b % n + n) % n
return b
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 522
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1000 ,SCREAMING_SNAKE_CASE_=[3, 3, 6, 4] ,SCREAMING_SNAKE_CASE_=[48, 56, 112, 220] ,):
'''simple docstring'''
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : List[Any] = num_channels
snake_case : List[Any] = is_training
snake_case : Dict = use_labels
snake_case : str = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : int = num_labels
snake_case : Optional[Any] = image_size
snake_case : Optional[Any] = layer_depths
snake_case : int = embed_dims
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[Any] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths ,embed_dims=self.embed_dims ,mlp_ratio=4 ,downsamples=[True, True, True, True] ,hidden_act="""gelu""" ,num_labels=self.num_labels ,down_patch_size=3 ,down_stride=2 ,down_pad=1 ,drop_rate=0.0 ,drop_path_rate=0.0 ,use_layer_scale=SCREAMING_SNAKE_CASE_ ,layer_scale_init_value=1E-5 ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = SwiftFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dims[-1], 7, 7) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = self.num_labels
snake_case : List[str] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Tuple = model(SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
snake_case : Optional[int] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case_ ( self ):
'''simple docstring'''
((snake_case) , (snake_case) , (snake_case)) : Dict = self.prepare_config_and_inputs()
snake_case : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__lowerCamelCase : Dict = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : str = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[Any] = False
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = SwiftFormerModelTester(self )
snake_case : List[str] = ConfigTester(
self ,config_class=SCREAMING_SNAKE_CASE_ ,has_text_modality=SCREAMING_SNAKE_CASE_ ,hidden_size=37 ,num_attention_heads=12 ,num_hidden_layers=12 ,)
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ ,nn.Linear ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[Any] = [*signature.parameters.keys()]
snake_case : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = outputs.hidden_states
snake_case : List[Any] = 8
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertEqual(
hidden_states[i].shape ,torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) ,)
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
def _config_zero_init(SCREAMING_SNAKE_CASE_ ):
snake_case : int = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,1E-10 )
if isinstance(getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = _config_zero_init(getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
setattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return configs_no_init
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = self.default_image_processor
snake_case : Optional[int] = prepare_img()
snake_case : Any = image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
snake_case : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE_ )
snake_case : str = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
| 36
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : str =get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
_lowercase : int =5
_lowercase : Dict =1_0
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Any = SpeechaTextTokenizer
lowercase : Dict = False
lowercase : Optional[Any] = True
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
super().setUp()
A : Optional[int] =sp.SentencePieceProcessor()
spm_model.Load(SCREAMING_SNAKE_CASE__ )
A : List[Any] =['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(SCREAMING_SNAKE_CASE__ ) )]
A : Optional[Any] =dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A : List[str] =Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
A : Tuple =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
A : Tuple ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_01 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
A : Union[str, Any] =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
A : int =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_89, 50, 14, 1_74, 3_86] , )
A : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
A : Tuple =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
# fmt: off
A : str ={'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
lowercase : Any = "valhalla/s2t_mustc_multilinguial_medium"
lowercase : Tuple = "C'est trop cool"
lowercase : Union[str, Any] = "Esto es genial"
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ) -> List[Any]:
A : SpeechaTextTokenizer =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
A : Any =[ES_CODE, 4, 16_01, 47, 76_47, 2]
A : str =self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
A : Tuple ='fr'
A : Tuple =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Any ='fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
A : str ='es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 709
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.