code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Optional[int] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694
| 1
|
def _lowerCamelCase ( __A : int , __A : int ) -> Tuple:
'''simple docstring'''
while a != 0:
_UpperCAmelCase , _UpperCAmelCase : Any = b % a, a
return b
def _lowerCamelCase ( __A : int , __A : int ) -> int:
'''simple docstring'''
if gcd(__A , __A ) != 1:
_UpperCAmelCase : List[str] = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = 1, 0, a
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = 0, 1, m
while va != 0:
_UpperCAmelCase : Optional[Any] = ua // va
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 717
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( __A : Optional[int] , __A : bool = False ) -> int:
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase : Union[str, Any] = f.read()
_UpperCAmelCase : Any = content.split('''\n''' )
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = 0
while line_idx < len(__A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase : Union[str, Any] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase : str = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase : List[str] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase : Tuple = sorted(__A , key=lambda __A : _re_identifier.search(__A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__A ) )
elif "\n".join(__A ) != content:
return True
def _lowerCamelCase ( __A : bool = False ) -> List[str]:
_UpperCAmelCase : List[str] = [os.path.join(__A , __A ) for f in os.listdir(__A ) if f.endswith('''.py''' )]
_UpperCAmelCase : List[Any] = [sort_auto_mapping(__A , overwrite=__A ) for fname in fnames]
if not overwrite and any(__A ):
_UpperCAmelCase : Optional[int] = [f for f, d in zip(__A , __A ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(__A )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 186
| 0
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = row, column
SCREAMING_SNAKE_CASE : Optional[Any] = [[default_value for c in range(UpperCAmelCase_ )] for r in range(UpperCAmelCase_ )]
def __str__( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
SCREAMING_SNAKE_CASE : Dict = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE : Optional[Any] = max(UpperCAmelCase_ , len(str(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = f'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase_ : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE : Optional[int] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_ ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
return str(self )
def _A ( self : Optional[int] , UpperCAmelCase_ : tuple[int, int] ):
if not (isinstance(UpperCAmelCase_ , (list, tuple) ) and len(UpperCAmelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase_ : tuple[int, int] ):
assert self.validate_indicies(UpperCAmelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : int , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float ):
assert self.validate_indicies(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : str = -self[r, c]
return result
def __sub__( self : Optional[Any] , UpperCAmelCase_ : Matrix ):
return self + (-another)
def __mul__( self : Dict , UpperCAmelCase_ : int | float | Matrix ):
if isinstance(UpperCAmelCase_ , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : str = self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE : List[str] = f'''Unsupported type given for another ({type(UpperCAmelCase_ )})'''
raise TypeError(UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[str] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : List[str] = self[r, c]
return result
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE : Tuple = v.transpose()
SCREAMING_SNAKE_CASE : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE : str = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
SCREAMING_SNAKE_CASE : Optional[int] = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = 1, 2, -3
SCREAMING_SNAKE_CASE : Tuple = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase , lowercase )}''' )
def lowerCamelCase__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 62
|
import sys
import turtle
def UpperCamelCase ( __lowercase : tuple[float, float] ,__lowercase : tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCamelCase ( __lowercase : tuple[float, float] ,__lowercase : tuple[float, float] ,__lowercase : tuple[float, float] ,__lowercase : int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__lowercase ,get_mid(__lowercase ,__lowercase ) ,get_mid(__lowercase ,__lowercase ) ,depth - 1 )
triangle(__lowercase ,get_mid(__lowercase ,__lowercase ) ,get_mid(__lowercase ,__lowercase ) ,depth - 1 )
triangle(__lowercase ,get_mid(__lowercase ,__lowercase ) ,get_mid(__lowercase ,__lowercase ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
_UpperCAmelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
_UpperCAmelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 558
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class snake_case__(lowercase_ ):
"""simple docstring"""
lowercase_ = """speech_to_text"""
lowercase_ = ["""past_key_values"""]
lowercase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int]=10_000 , SCREAMING_SNAKE_CASE : str=12 , SCREAMING_SNAKE_CASE : List[Any]=2_048 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Optional[Any]=6 , SCREAMING_SNAKE_CASE : int=2_048 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : Optional[Any]=256 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : List[str]=6_000 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[int]=(5, 5) , SCREAMING_SNAKE_CASE : int=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=80 , SCREAMING_SNAKE_CASE : Any=1 , **SCREAMING_SNAKE_CASE : int , ):
lowercase__ : List[Any] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : Dict = encoder_ffn_dim
lowercase__ : Any = encoder_layers
lowercase__ : List[Any] = encoder_attention_heads
lowercase__ : int = decoder_ffn_dim
lowercase__ : Any = decoder_layers
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : Any = activation_dropout
lowercase__ : Tuple = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = encoder_layerdrop
lowercase__ : Any = decoder_layerdrop
lowercase__ : str = use_cache
lowercase__ : int = encoder_layers
lowercase__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : Any = max_source_positions
lowercase__ : Optional[int] = max_target_positions
lowercase__ : Tuple = num_conv_layers
lowercase__ : Union[str, Any] = list(lowerCamelCase_ )
lowercase__ : Dict = conv_channels
lowercase__ : Any = input_feat_per_channel
lowercase__ : Dict = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 713
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a = params
__a = np.array(SCREAMING_SNAKE_CASE__ )
__a = np.array([len(SCREAMING_SNAKE_CASE__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.lengths )
def __a ( self : str ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __a ( self : Dict ):
'''simple docstring'''
__a = self.params.max_model_input_size
__a = self.lengths > max_len
logger.info(f'''Splitting {sum(SCREAMING_SNAKE_CASE__ )} too long sequences.''' )
def divide_chunks(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
__a = []
__a = []
if self.params.mlm:
__a , __a = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ )
if sub_s[-1] != sep_id:
__a = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE__ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE__ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE__ ) for l in sub_seqs] )
__a = np.array(SCREAMING_SNAKE_CASE__ )
__a = np.array(SCREAMING_SNAKE_CASE__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = len(self )
__a = self.lengths > 1_1
__a = self.token_ids[indices]
__a = self.lengths[indices]
__a = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __a ( self : Optional[Any] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a = self.params.special_tok_ids["""unk_token"""]
__a = len(self )
__a = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a = (unk_occs / self.lengths) < 0.5
__a = self.token_ids[indices]
__a = self.lengths[indices]
__a = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __a ( self : str ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a = [t[0] for t in batch]
__a = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
# Max for paddings
__a = max(SCREAMING_SNAKE_CASE__ )
# Pad token ids
if self.params.mlm:
__a = self.params.special_tok_ids["""pad_token"""]
else:
__a = self.params.special_tok_ids["""unk_token"""]
__a = [list(t.astype(SCREAMING_SNAKE_CASE__ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE__ )
assert all(len(SCREAMING_SNAKE_CASE__ ) == max_seq_len_ for t in tk_ )
__a = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a = torch.tensor(SCREAMING_SNAKE_CASE__ ) # (bs)
return tk_t, lg_t
| 582
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _lowerCamelCase ( a_ ):
_lowerCamelCase :torch.FloatTensor
_lowerCamelCase :torch.FloatTensor
_lowerCamelCase :Optional[torch.FloatTensor] = None
class _lowerCamelCase ( a_ , a_ ):
_lowerCamelCase :Union[str, Any] = 2
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase : float = 0.02 , UpperCamelCase : float = 1_00 , UpperCamelCase : float = 1.007 , UpperCamelCase : float = 80 , UpperCamelCase : float = 0.05 , UpperCamelCase : float = 50 , ) -> Tuple:
"""simple docstring"""
# standard deviation of the initial noise distribution
lowerCAmelCase__ : Dict = sigma_max
# setable values
lowerCAmelCase__ : int = None
lowerCAmelCase__ : np.IntTensor = None
lowerCAmelCase__ : torch.FloatTensor = None # sigma(t_i)
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCAmelCase ( self : int , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = num_inference_steps
lowerCAmelCase__ : Tuple = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase__ : Any = torch.from_numpy(UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase__ : List[str] = torch.tensor(UpperCamelCase , dtype=torch.floataa , device=UpperCamelCase )
def _lowerCAmelCase ( self : str , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float , UpperCamelCase : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase__ : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase__ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase__ : Optional[int] = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase ).to(sample.device )
lowerCAmelCase__ : List[Any] = sigma + gamma * sigma
lowerCAmelCase__ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowerCAmelCase ( self : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = sample_hat + sigma_hat * model_output
lowerCAmelCase__ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase , derivative=UpperCamelCase , pred_original_sample=UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
lowerCAmelCase__ : str = sample_prev + sigma_prev * model_output
lowerCAmelCase__ : Tuple = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase__ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase , derivative=UpperCamelCase , pred_original_sample=UpperCamelCase )
def _lowerCAmelCase ( self : int , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
| 507
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = """▁"""
_A = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_A = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_A = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_A = {
"""ernie-m-base""": 5_1_4,
"""ernie-m-large""": 5_1_4,
}
_A = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = ["input_ids"]
_lowerCamelCase :Any = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :List[Any] = RESOURCE_FILES_NAMES
def __init__( self : Tuple , UpperCamelCase : int , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=False , UpperCamelCase : int="utf8" , UpperCamelCase : List[Any]="[UNK]" , UpperCamelCase : int="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : str="[CLS]" , UpperCamelCase : Dict="[MASK]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , vocab_file=UpperCamelCase , encoding=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowerCAmelCase__ : Any = do_lower_case
lowerCAmelCase__ : Optional[Any] = sentencepiece_model_ckpt
lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase__ : Optional[int] = self.load_vocab(filepath=UpperCamelCase )
else:
lowerCAmelCase__ : Tuple = {self.sp_model.id_to_piece(UpperCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase__ : List[str] = {v: k for k, v in self.vocab.items()}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Any:
"""simple docstring"""
if text is None:
return None
lowerCAmelCase__ : Optional[Any] = self.tokenize(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = """""", []
for i, ch in enumerate(UpperCamelCase ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase__ : Union[str, Any] = self.SP_CHAR_MAPPING.get(UpperCamelCase )
else:
lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFKC""" , UpperCamelCase )
if self.is_whitespace(UpperCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase__ : List[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase__ : List[str] = token[1:]
lowerCAmelCase__ : Dict = text[offset:].index(UpperCamelCase ) + offset
lowerCAmelCase__ : List[Any] = start + len(UpperCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase__ : Optional[int] = end
return token_mapping
@property
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.vocab )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.__dict__.copy()
lowerCAmelCase__ : Any = None
return state
def __setstate__( self : List[str] , UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : str ) -> str:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase , UpperCamelCase ) for c in text) )
def _lowerCAmelCase ( self : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=64 , UpperCamelCase : List[Any]=0.1 ) -> Any:
"""simple docstring"""
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
lowerCAmelCase__ : Union[str, Any] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
lowerCAmelCase__ : Union[str, Any] = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
lowerCAmelCase__ : Union[str, Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
lowerCAmelCase__ : Union[str, Any] = self.sp_model.EncodeAsPieces(UpperCamelCase )
else:
lowerCAmelCase__ : List[str] = self.sp_model.SampleEncodeAsPieces(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = []
for pi, piece in enumerate(UpperCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase ) and pi != 0:
new_pieces.append(UpperCamelCase )
continue
else:
continue
lowerCAmelCase__ : List[Any] = 0
for i, chunk in enumerate(UpperCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase ) or self.is_punct(UpperCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase__ : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase__ : Any = i
if len(UpperCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.convert_ids_to_tokens(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return self.vocab.get(UpperCamelCase , self.vocab.get(self.unk_token ) )
def _lowerCAmelCase ( self : str , UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return self.reverse_vocab.get(UpperCamelCase , self.unk_token )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Tuple=None ) -> List[str]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any]=None ) -> Any:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=None , UpperCamelCase : Any=False ) -> List[Any]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase ) + 1) + [1] * (len(UpperCamelCase ) + 3)
def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowerCAmelCase ( self : int , UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase ) == 1:
lowerCAmelCase__ : List[Any] = unicodedata.category(UpperCamelCase )
if cat == "Zs":
return True
return False
def _lowerCAmelCase ( self : str , UpperCamelCase : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = {}
with io.open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Any = line.rstrip("""\n""" )
lowerCAmelCase__ : Optional[Any] = int(UpperCamelCase )
return token_to_idx
def _lowerCAmelCase ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = 0
if os.path.isdir(UpperCamelCase ):
lowerCAmelCase__ : List[str] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCAmelCase__ : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase__ : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """sentencepiece.bpe.model""" )
with open(UpperCamelCase , """wb""" ) as fi:
lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (vocab_file,)
| 507
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = filter(lambda UpperCamelCase__ : p.requires_grad , model.parameters() )
__UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
if metric == "rouge2":
__UpperCAmelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__UpperCAmelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__UpperCAmelCase = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__UpperCAmelCase = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__UpperCAmelCase = ModelCheckpoint(
dirpath=UpperCamelCase__ , filename=UpperCamelCase__ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=UpperCamelCase__ , verbose=UpperCamelCase__ , )
class A ( pl.Callback ):
def snake_case__ ( self : Union[str, Any] , __a : int , __a : Any ) -> Optional[Any]:
__UpperCAmelCase = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def snake_case__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Any=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCAmelCase = od / '''test_results.txt'''
__UpperCAmelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__UpperCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , '''a+''' ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCAmelCase = metrics[key]
if isinstance(__a , torch.Tensor ):
__UpperCAmelCase = val.item()
__UpperCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
__UpperCAmelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__a )
@rank_zero_only
def snake_case__ ( self : List[Any] , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
try:
__UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCAmelCase = pl_module.model.num_parameters()
__UpperCAmelCase = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case__ ( self : List[str] , __a : pl.Trainer , __a : pl.LightningModule ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , '''test''' )
@rank_zero_only
def snake_case__ ( self : Any , __a : pl.Trainer , __a : Union[str, Any] ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 262
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase : List[str] = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any]="replace" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : Dict="</s>" , __a : Dict="<s>" , __a : Tuple="<unk>" , __a : List[str]="<pad>" , __a : Any="<mask>" , __a : Dict=False , **__a : Union[str, Any] , ) -> Optional[int]:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
return len(self.encoder )
def snake_case__ ( self : str ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : List[Any] , __a : Tuple ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = get_pairs(__a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(__a ):
try:
__UpperCAmelCase = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = new_word
if len(__a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = word
return word
def snake_case__ ( self : int , __a : int ) -> List[Any]:
__UpperCAmelCase = []
for token in re.findall(self.pat , __a ):
__UpperCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self : Optional[Any] , __a : Tuple ) -> str:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Optional[int] , __a : Any ) -> List[str]:
return self.decoder.get(__a )
def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> List[Any]:
__UpperCAmelCase = ''''''.join(__a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '''\n''' )
__UpperCAmelCase = 0
with open(__a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase = token_index
writer.write(''' '''.join(__a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , __a : Optional[int] , __a : int=False , **__a : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ''' ''' + text
return (text, kwargs)
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[Any] , __a : "Conversation" ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = self.encode(__a )
if len(__a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 262
| 1
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Dict , ):
snake_case__ : List[Any] = parent
snake_case__ : Tuple = 13
snake_case__ : List[str] = 7
snake_case__ : Optional[int] = 30
snake_case__ : Dict = self.seq_length + self.mem_len
snake_case__ : Optional[Any] = 15
snake_case__ : List[Any] = True
snake_case__ : Optional[int] = True
snake_case__ : List[Any] = 99
snake_case__ : List[Any] = [10, 50, 80]
snake_case__ : List[str] = 32
snake_case__ : str = 32
snake_case__ : List[str] = 4
snake_case__ : Dict = 8
snake_case__ : Optional[int] = 128
snake_case__ : int = 2
snake_case__ : Any = 2
snake_case__ : Any = None
snake_case__ : int = 1
snake_case__ : Dict = 0
snake_case__ : Any = 3
snake_case__ : List[str] = self.vocab_size - 1
snake_case__ : int = 0.01
def lowerCamelCase ( self : List[str] ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[int] ):
snake_case__ : List[Any] = TFTransfoXLModel(snake_case_ )
snake_case__ : int = model(snake_case_ ).to_tuple()
snake_case__ : int = {"""input_ids""": input_ids_a, """mems""": mems_a}
snake_case__ : List[str] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Union[str, Any] ):
snake_case__ : List[Any] = TFTransfoXLLMHeadModel(snake_case_ )
snake_case__ : Union[str, Any] = model(snake_case_ ).to_tuple()
snake_case__ : Union[str, Any] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
snake_case__ : Any = model(snake_case_ ).to_tuple()
snake_case__ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
snake_case__ : Dict = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
snake_case__ : Optional[int] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Tuple ):
snake_case__ : Optional[Any] = TFTransfoXLForSequenceClassification(snake_case_ )
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
(snake_case__) : Dict = config_and_inputs
snake_case__ : int = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase = () if is_tf_available() else ()
lowercase = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[int] = TFTransfoXLModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , d_embed=37 )
def lowerCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[str] ):
self.model_tester.set_seed()
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case_ )
def lowerCamelCase ( self : List[Any] ):
self.model_tester.set_seed()
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case_ )
def lowerCamelCase ( self : Tuple ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
snake_case__ : int = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
snake_case__ : Optional[int] = model.get_output_embeddings()
assert isinstance(snake_case_ , tf.keras.layers.Layer )
snake_case__ : Optional[int] = model.get_bias()
assert name is None
else:
snake_case__ : List[str] = model.get_output_embeddings()
assert x is None
snake_case__ : int = model.get_bias()
assert name is None
def lowerCamelCase ( self : Dict ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase ( self : List[str] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = TFTransfoXLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def lowerCamelCase ( self : Tuple ):
pass
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
snake_case__ : Optional[Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
snake_case__ : Any = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
snake_case__ : Optional[Any] = model.generate(snake_case_ , max_length=200 , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_ )
| 712
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__a = logging.get_logger(__name__)
__a = "T5Config"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> jnp.ndarray:
snake_case__ : int = jnp.zeros_like(_lowerCAmelCase )
snake_case__ : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
snake_case__ : List[str] = shifted_input_ids.at[:, 0].set(_lowerCAmelCase )
snake_case__ : List[str] = jnp.where(shifted_input_ids == -100 , _lowerCAmelCase , _lowerCAmelCase )
return shifted_input_ids
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "mt5"
lowercase = MTaConfig
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "mt5"
lowercase = MTaConfig
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "mt5"
lowercase = MTaConfig
| 301
| 0
|
def __UpperCAmelCase ( lowerCamelCase_ : int = 10 , lowerCamelCase_ : int = 10_00 , lowerCamelCase_ : bool = True ) -> int:
"""simple docstring"""
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> None:
"""simple docstring"""
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(lowerCamelCase_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = lower
SCREAMING_SNAKE_CASE_ : Dict = higher
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
while True:
SCREAMING_SNAKE_CASE_ : str = get_avg(lowerCamelCase_ , lowerCamelCase_ )
last_numbers.append(lowerCamelCase_ )
if answer(lowerCamelCase_ ) == "low":
SCREAMING_SNAKE_CASE_ : List[Any] = number
elif answer(lowerCamelCase_ ) == "high":
SCREAMING_SNAKE_CASE_ : List[Any] = number
else:
break
print(F'guess the number : {last_numbers[-1]}' )
print(F'details : {last_numbers!s}' )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(input('Enter lower value : ' ).strip() )
SCREAMING_SNAKE_CASE_ : List[Any] = int(input('Enter high value : ' ).strip() )
SCREAMING_SNAKE_CASE_ : Dict = int(input('Enter value to guess : ' ).strip() )
guess_the_number(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 105
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase ( __UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = "trajectory_transformer"
_SCREAMING_SNAKE_CASE = ["past_key_values"]
_SCREAMING_SNAKE_CASE = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , __snake_case : Dict=1_00 , __snake_case : Tuple=5 , __snake_case : Tuple=1 , __snake_case : List[str]=1 , __snake_case : str=2_49 , __snake_case : Optional[Any]=6 , __snake_case : str=17 , __snake_case : Dict=25 , __snake_case : Union[str, Any]=4 , __snake_case : Any=4 , __snake_case : Dict=1_28 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[Any]=0.0_006 , __snake_case : Any=5_12 , __snake_case : Optional[int]=0.02 , __snake_case : List[Any]=1e-12 , __snake_case : List[str]=1 , __snake_case : List[Any]=True , __snake_case : Dict=1 , __snake_case : List[Any]=5_02_56 , __snake_case : str=5_02_56 , **__snake_case : int , ):
'''simple docstring'''
_snake_case: List[Any] = vocab_size
_snake_case: Optional[Any] = action_weight
_snake_case: Any = reward_weight
_snake_case: str = value_weight
_snake_case: Union[str, Any] = max_position_embeddings
_snake_case: Optional[int] = block_size
_snake_case: int = action_dim
_snake_case: Tuple = observation_dim
_snake_case: Dict = transition_dim
_snake_case: int = learning_rate
_snake_case: Optional[int] = n_layer
_snake_case: Optional[int] = n_head
_snake_case: List[Any] = n_embd
_snake_case: List[Any] = embd_pdrop
_snake_case: Union[str, Any] = attn_pdrop
_snake_case: Dict = resid_pdrop
_snake_case: List[Any] = initializer_range
_snake_case: List[Any] = layer_norm_eps
_snake_case: int = kaiming_initializer_range
_snake_case: Optional[Any] = use_cache
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 273
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Optional[int]=False ):
'''simple docstring'''
_snake_case: Dict = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
_snake_case: List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCamelCase ( __UpperCAmelCase ):
def __init__( self : List[str] , __snake_case : int , __snake_case : Any=13 , __snake_case : Dict=7 , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : List[Any]=True , __snake_case : Tuple=True , __snake_case : List[str]=99 , __snake_case : List[Any]=32 , __snake_case : Optional[Any]=32 , __snake_case : int=2 , __snake_case : Optional[int]=4 , __snake_case : Union[str, Any]=37 , __snake_case : List[str]="gelu" , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=5_12 , __snake_case : Dict=16 , __snake_case : List[Any]=2 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=3 , __snake_case : Any=4 , __snake_case : Tuple=None , ):
'''simple docstring'''
_snake_case: List[str] = parent
_snake_case: Any = batch_size
_snake_case: Union[str, Any] = seq_length
_snake_case: List[str] = is_training
_snake_case: Optional[int] = use_input_mask
_snake_case: Tuple = use_token_type_ids
_snake_case: Optional[int] = use_labels
_snake_case: str = vocab_size
_snake_case: str = hidden_size
_snake_case: Optional[Any] = num_hidden_layers
_snake_case: List[str] = num_attention_heads
_snake_case: str = intermediate_size
_snake_case: Optional[int] = hidden_act
_snake_case: Any = hidden_dropout_prob
_snake_case: Optional[int] = attention_probs_dropout_prob
_snake_case: Any = max_position_embeddings
_snake_case: int = type_vocab_size
_snake_case: Tuple = type_sequence_label_size
_snake_case: Optional[Any] = initializer_range
_snake_case: Tuple = num_labels
_snake_case: Optional[Any] = num_choices
_snake_case: Union[str, Any] = scope
_snake_case: Any = embedding_size
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case: List[str] = None
if self.use_input_mask:
_snake_case: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case: List[str] = None
if self.use_token_type_ids:
_snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case: Optional[Any] = None
_snake_case: Any = None
_snake_case: int = None
if self.use_labels:
_snake_case: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case: str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: int = TFMobileBertModel(config=__snake_case )
_snake_case: str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[str] = model(__snake_case )
_snake_case: List[str] = [input_ids, input_mask]
_snake_case: Dict = model(__snake_case )
_snake_case: List[str] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , __snake_case : int , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: str = TFMobileBertForMaskedLM(config=__snake_case )
_snake_case: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: Optional[int] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str ):
'''simple docstring'''
_snake_case: Union[str, Any] = TFMobileBertForNextSentencePrediction(config=__snake_case )
_snake_case: List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
_snake_case: str = TFMobileBertForPreTraining(config=__snake_case )
_snake_case: str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[str] = model(__snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : int , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
'''simple docstring'''
_snake_case: int = self.num_labels
_snake_case: Tuple = TFMobileBertForSequenceClassification(config=__snake_case )
_snake_case: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Dict , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Tuple = self.num_choices
_snake_case: Optional[int] = TFMobileBertForMultipleChoice(config=__snake_case )
_snake_case: str = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case: Union[str, Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case: Optional[int] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case: Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_snake_case: Optional[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : str , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: Tuple = self.num_labels
_snake_case: Dict = TFMobileBertForTokenClassification(config=__snake_case )
_snake_case: str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : Any ):
'''simple docstring'''
_snake_case: int = TFMobileBertForQuestionAnswering(config=__snake_case )
_snake_case: Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case: Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: str = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): Any = config_and_inputs
_snake_case: str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case: Dict = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_snake_case: Optional[Any] = TFMobileBertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Optional[Any] = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_snake_case: Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case: Optional[Any] = model(__snake_case )[0]
_snake_case: str = [1, 6, 3_05_22]
self.assertEqual(output.shape , __snake_case )
_snake_case: int = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1e-4 )
| 273
| 1
|
import math
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 639
|
'''simple docstring'''
from collections.abc import Callable
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : float = a
lowerCamelCase_ : float = b
if function(__UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCAmelCase ) == 0:
return b
elif (
function(__UpperCAmelCase ) * function(__UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowerCamelCase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCAmelCase ) == 0:
return mid
elif function(__UpperCAmelCase ) * function(__UpperCAmelCase ) < 0:
lowerCamelCase_ : List[str] = mid
else:
lowerCamelCase_ : Any = mid
lowerCamelCase_ : int = start + (end - start) / 2.0
return mid
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 501
| 0
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCamelCase__ : str = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCamelCase__ : Dict = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCamelCase__ : Dict = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
UpperCamelCase__ : int = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
UpperCamelCase__ : int = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
UpperCamelCase__ : str = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
UpperCamelCase__ : str = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCamelCase__ : Any = np.expand_dims(test_image, axis=0)
UpperCamelCase__ : Optional[int] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCamelCase__ : Optional[Any] = 'Normal'
if result[0][0] == 1:
UpperCamelCase__ : List[str] = 'Abnormality detected'
| 721
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = '▁'
UpperCamelCase__ : Any = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
UpperCamelCase__ : Dict = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
UpperCamelCase__ : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : List[int] = []
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="m2m100" ,lowerCamelCase_ = None ,lowerCamelCase_=8 ,**lowerCamelCase_ ,) -> None:
'''simple docstring'''
UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ : Dict = language_codes
UpperCAmelCase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase__ : Union[str, Any] = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCAmelCase__ : Any = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCamelCase_ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCamelCase_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase_ ,tgt_lang=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,language_codes=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase__ : Optional[int] = vocab_file
UpperCAmelCase__ : Optional[Any] = load_json(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = spm_file
UpperCAmelCase__ : Any = load_spm(lowerCamelCase_ ,self.sp_model_kwargs )
UpperCAmelCase__ : int = len(self.encoder )
UpperCAmelCase__ : Optional[int] = {
self.get_lang_token(lowerCamelCase_ ): self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase_ )
}
UpperCAmelCase__ : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase_ )}
UpperCAmelCase__ : List[str] = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase__ : Optional[int] = src_lang if src_lang is not None else '''en'''
UpperCAmelCase__ : int = tgt_lang
UpperCAmelCase__ : int = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase__ : Optional[int] = num_madeup_words
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCamelCase_ ,self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCamelCase_ ,self.unk_token )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
UpperCAmelCase__ : str = []
else:
current_sub_tokens.append(lowerCamelCase_ )
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
UpperCAmelCase__ : Dict = [1] * len(self.prefix_tokens )
UpperCAmelCase__ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__dict__.copy()
UpperCAmelCase__ : str = None
return state
def __setstate__( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : int = load_spm(self.spm_file ,self.sp_model_kwargs )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path(lowerCamelCase_ )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
UpperCAmelCase__ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase__ : str = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder ,lowerCamelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,lowerCamelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase_ ,'''wb''' ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (str(lowerCamelCase_ ), str(lowerCamelCase_ ))
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = "en" ,lowerCamelCase_ = None ,lowerCamelCase_ = "ro" ,**lowerCamelCase_ ,) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase__ : List[str] = src_lang
UpperCAmelCase__ : List[str] = self(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.get_lang_id(lowerCamelCase_ )
UpperCAmelCase__ : Dict = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_lang_token(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Union[str, Any] = [self.cur_lang_id]
UpperCAmelCase__ : Dict = [self.eos_token_id]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_lang_token(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Tuple = [self.cur_lang_id]
UpperCAmelCase__ : str = [self.eos_token_id]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_lang_token(lowerCamelCase_ )
return self.lang_token_to_id[lang_token]
def __UpperCamelCase( _A : str , _A : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def __UpperCamelCase( _A : str ):
'''simple docstring'''
with open(_A , '''r''' ) as f:
return json.load(_A )
def __UpperCamelCase( _A : List[str] , _A : str ):
'''simple docstring'''
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=2 )
| 496
| 0
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self : List[str] , snake_case__ : int , snake_case__ : Optional[int]=3 , snake_case__ : Any=32 , snake_case__ : int=3 , snake_case__ : int=10 , snake_case__ : Union[str, Any]=[10, 20, 30, 40] , snake_case__ : Union[str, Any]=[1, 1, 2, 1] , snake_case__ : Optional[Any]=True , snake_case__ : Optional[Any]=True , snake_case__ : str="relu" , snake_case__ : str=3 , snake_case__ : Optional[int]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(snake_case__ )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __a ( self : List[str] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __a ( self : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RegNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : str , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = RegNetForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCAmelCase__ =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ =(
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __a ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __a ( self : List[Any] ):
"""simple docstring"""
pass
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=snake_case__ )
for name, module in model.named_modules():
if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __a ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] ):
SCREAMING_SNAKE_CASE_ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ = layer_type
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def __a ( self : int ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = RegNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _a ( )-> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
@cached_property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 360
|
from math import isqrt
def _a ( lowerCAmelCase )-> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase ) + 1 ) )
def _a ( lowerCAmelCase = 10**6 )-> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 360
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase__ ( a_ : int ) -> List[Any]:
UpperCAmelCase__ : Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCAmelCase__ ( a_ : int ) -> Union[str, Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = emb.weight.shape
UpperCAmelCase__ : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCAmelCase__ : Any = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( a_ : Dict ) -> Tuple:
UpperCAmelCase__ : str = torch.load(_snake_case , map_location='''cpu''' )
UpperCAmelCase__ : Union[str, Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
UpperCAmelCase__ : Tuple = mam_aaa['''model''']
remove_ignore_keys_(_snake_case )
UpperCAmelCase__ : Any = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase__ : Optional[int] = MaMaaaConfig(
vocab_size=_snake_case , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
UpperCAmelCase__ : Tuple = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase__ : Any = MaMaaaForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case , strict=_snake_case )
UpperCAmelCase__ : Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "facebook/bart-large-mnli"
SCREAMING_SNAKE_CASE : int = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = "text_classifier"
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Union[str, Any] = ["text", ["text"]]
SCREAMING_SNAKE_CASE : Dict = ["text"]
def lowerCamelCase ( self ):
super().setup()
UpperCAmelCase__ : Tuple = self.model.config
UpperCAmelCase__ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
UpperCAmelCase__ : Optional[Any] = int(_UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Optional[int] = labels
return self.pre_processor(
[text] * len(_UpperCAmelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Tuple = outputs.logits
UpperCAmelCase__ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 599
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'open-llama'
def __init__( self : List[Any] ,lowerCamelCase : Optional[Any]=10_0000 ,lowerCamelCase : Optional[Any]=4096 ,lowerCamelCase : Tuple=1_1008 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Tuple=32 ,lowerCamelCase : int="silu" ,lowerCamelCase : Dict=2048 ,lowerCamelCase : Optional[int]=0.02 ,lowerCamelCase : Any=1E-6 ,lowerCamelCase : Dict=True ,lowerCamelCase : List[Any]=0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : List[str]=2 ,lowerCamelCase : str=False ,lowerCamelCase : Dict=True ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : str=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = rms_norm_eps
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = kwargs.pop(
"""use_memorry_efficient_attention""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_dropout_prob
__SCREAMING_SNAKE_CASE = use_stable_embedding
__SCREAMING_SNAKE_CASE = shared_input_output_embedding
__SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,tie_word_embeddings=lowerCamelCase ,**lowerCamelCase ,)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
__SCREAMING_SNAKE_CASE = self.rope_scaling.get("""type""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.rope_scaling.get("""factor""" ,lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase ,lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 109
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , ) -> list[float]:
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = coefficient_matrix.shape
lowercase_ , lowercase_ : Any = constant_matrix.shape
if rowsa != colsa:
lowercase_ : List[Any] = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowercase )
if colsa != 1:
lowercase_ : Optional[int] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowercase )
if rowsa != rowsa:
lowercase_ : Tuple = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowercase )
if len(lowercase ) != rowsa:
lowercase_ : int = (
"""Number of initial values must be equal to number of rows in coefficient """
f"""matrix but received {len(lowercase )} and {rowsa}"""
)
raise ValueError(lowercase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowercase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase_ , lowercase_ : Dict = table.shape
strictly_diagonally_dominant(lowercase )
# Iterates the whole matrix for given number of times
for _ in range(lowercase ):
lowercase_ : str = []
for row in range(lowercase ):
lowercase_ : Dict = 0
for col in range(lowercase ):
if col == row:
lowercase_ : Optional[Any] = table[row][col]
elif col == cols - 1:
lowercase_ : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase_ : List[Any] = (temp + val) / denom
new_val.append(lowercase )
lowercase_ : Optional[int] = new_val
return [float(lowercase ) for i in new_val]
def __magic_name__ ( lowercase ) -> bool:
"""simple docstring"""
lowercase_ , lowercase_ : str = table.shape
lowercase_ : str = True
for i in range(0 , lowercase ):
lowercase_ : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 458
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCAmelCase =__lowerCAmelCase =__lowerCAmelCase =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
__lowerCAmelCase =numbers[i]
if number < 0:
__lowerCAmelCase , __lowerCAmelCase =min_till_now, max_till_now
__lowerCAmelCase =max(__lowerCamelCase , max_till_now * number )
__lowerCAmelCase =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
__lowerCAmelCase =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 456
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=False ) -> List[str]:
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case = """"""
else:
snake_case = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> List[str]:
snake_case = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> int:
snake_case = dct.pop(__lowerCAmelCase )
snake_case = val
def __lowerCamelCase ( ) -> Optional[Any]:
snake_case = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ) -> Dict:
snake_case = ViTConfig()
snake_case = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case = True
snake_case = int(vit_name[-12:-10] )
snake_case = int(vit_name[-9:-6] )
else:
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = """imagenet-1k-id2label.json"""
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = int(vit_name[-6:-4] )
snake_case = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
snake_case = 1_92
snake_case = 7_68
snake_case = 12
snake_case = 3
elif vit_name[9:].startswith("""small""" ):
snake_case = 3_84
snake_case = 15_36
snake_case = 12
snake_case = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
snake_case = 7_68
snake_case = 23_04
snake_case = 8
snake_case = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
elif vit_name[4:].startswith("""huge""" ):
snake_case = 12_80
snake_case = 51_20
snake_case = 32
snake_case = 16
# load original model from timm
snake_case = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case = DeiTImageProcessor(size=config.image_size )
else:
snake_case = ViTImageProcessor(size=config.image_size )
snake_case = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case = encoding["""pixel_values"""]
snake_case = model(__lowerCAmelCase )
if base_model:
snake_case = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 369
|
'''simple docstring'''
import re
def __lowerCamelCase ( __lowerCAmelCase : str ) -> list:
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
snake_case = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool , __lowerCAmelCase : str ) -> str:
try:
snake_case = split_input(__lowerCAmelCase )
if upper:
snake_case = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
snake_case = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
return to_simple_case(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
try:
snake_case = to_simple_case(__lowerCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """_""" )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> str:
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , """-""" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 369
| 1
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a_ : Any = '''bert-base-cased'''
a_ : int = '''fp16'''
a_ : Union[str, Any] = '''bf16'''
a_ : Union[str, Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __lowercase( lowercase__ ):
'''simple docstring'''
def snake_case_ ( self ):
super().setUp()
__lowerCamelCase : str = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def snake_case_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a ):
__lowerCamelCase : Any = self.dist_env.copy()
__lowerCamelCase : Optional[Any] = f'''{i + 1}'''
__lowerCamelCase : Any = strategy
with mockenv_context(**__a ):
__lowerCamelCase : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def snake_case_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a ):
__lowerCamelCase : int = self.dist_env.copy()
__lowerCamelCase : Union[str, Any] = prefetch_policy
with mockenv_context(**__a ):
__lowerCamelCase : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def snake_case_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a ):
__lowerCamelCase : List[Any] = self.dist_env.copy()
__lowerCamelCase : Optional[Any] = state_dict_type
with mockenv_context(**__a ):
__lowerCamelCase : Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = AutoModel.from_pretrained(__a )
for policy in FSDP_AUTO_WRAP_POLICY:
__lowerCamelCase : Optional[int] = self.dist_env.copy()
__lowerCamelCase : Tuple = policy
if policy == "TRANSFORMER_BASED_WRAP":
__lowerCamelCase : List[str] = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
__lowerCamelCase : str = '2000'
with mockenv_context(**__a ):
__lowerCamelCase : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__lowerCamelCase : Tuple = self.dist_env.copy()
__lowerCamelCase : List[str] = 'TRANSFORMER_BASED_WRAP'
__lowerCamelCase : Optional[Any] = 'T5Layer'
with mockenv_context(**__a ):
__lowerCamelCase : int = FullyShardedDataParallelPlugin()
with self.assertRaises(__a ) as cm:
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
__lowerCamelCase : List[str] = self.dist_env.copy()
__lowerCamelCase : Optional[Any] = 'SIZE_BASED_WRAP'
__lowerCamelCase : int = '0'
with mockenv_context(**__a ):
__lowerCamelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def snake_case_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__lowerCamelCase : List[Any] = self.dist_env.copy()
__lowerCamelCase : Any = mp_dtype
with mockenv_context(**__a ):
__lowerCamelCase : List[str] = Accelerator()
if mp_dtype == "fp16":
__lowerCamelCase : Tuple = torch.floataa
elif mp_dtype == "bf16":
__lowerCamelCase : Union[str, Any] = torch.bfloataa
__lowerCamelCase : Union[str, Any] = MixedPrecision(param_dtype=__a , reduce_dtype=__a , buffer_dtype=__a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__a )
def snake_case_ ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__lowerCamelCase : Any = self.dist_env.copy()
__lowerCamelCase : Any = str(__a ).lower()
with mockenv_context(**__a ):
__lowerCamelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__a ) )
@require_fsdp
@require_multi_gpu
@slow
class __lowercase( lowercase__ ):
'''simple docstring'''
def snake_case_ ( self ):
super().setUp()
__lowerCamelCase : Tuple = 0.82
__lowerCamelCase : Union[str, Any] = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
__lowerCamelCase : int = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__lowerCamelCase : Optional[int] = 160
__lowerCamelCase : Optional[Any] = 160
__lowerCamelCase : int = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def snake_case_ ( self ):
__lowerCamelCase : Union[str, Any] = os.path.join(self.test_scripts_folder , 'test_performance.py' )
__lowerCamelCase : Union[str, Any] = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
__lowerCamelCase : Dict = cmd.copy()
for i, strategy in enumerate(__a ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def snake_case_ ( self ):
__lowerCamelCase : str = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
__lowerCamelCase : int = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(__a ):
__lowerCamelCase : Optional[Any] = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
__lowerCamelCase : Union[str, Any] = len(__a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__lowerCamelCase : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
__lowerCamelCase : Union[str, Any] = cmd_config[:-1]
__lowerCamelCase : Optional[int] = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def snake_case_ ( self ):
__lowerCamelCase : int = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
__lowerCamelCase : Tuple = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__lowerCamelCase : Any = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(__a ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 263
|
"""simple docstring"""
import random
def UpperCAmelCase ( A__: Union[str, Any] , A__: List[str] , A__: Union[str, Any] ) -> int:
__lowerCamelCase : Optional[Any] = a[left_index]
__lowerCamelCase : int = left_index + 1
for j in range(left_index + 1 , A__ ):
if a[j] < pivot:
__lowerCamelCase , __lowerCamelCase : Optional[int] = a[i], a[j]
i += 1
__lowerCamelCase , __lowerCamelCase : str = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase ( A__: List[Any] , A__: Tuple , A__: Tuple ) -> Dict:
if left < right:
__lowerCamelCase : Optional[int] = random.randint(A__ , right - 1 )
__lowerCamelCase , __lowerCamelCase : int = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCamelCase : Union[str, Any] = partition(A__ , A__ , A__ )
quick_sort_random(
A__ , A__ , A__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
A__ , pivot_index + 1 , A__ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase ( ) -> int:
__lowerCamelCase : Dict = input('Enter numbers separated by a comma:\n' ).strip()
__lowerCamelCase : int = [int(A__ ) for item in user_input.split(',' )]
quick_sort_random(A__ , 0 , len(A__ ) )
print(A__ )
if __name__ == "__main__":
main()
| 263
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = GPTSwaTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = True
__lowerCamelCase : Any = False
def UpperCamelCase__ ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ =GPTSwaTokenizer(_UpperCAmelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any ):
UpperCamelCase_ ="This is a test"
UpperCamelCase_ ="This is a test"
return input_text, output_text
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ ="<s>"
UpperCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 2000 )
def UpperCamelCase__ ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =GPTSwaTokenizer(_UpperCAmelCase )
UpperCamelCase_ =tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [465, 287, 265, 631, 842] )
UpperCamelCase_ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
_UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
UpperCamelCase_ =tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCamelCase_ =tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
# fmt: off
self.assertListEqual(
_UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =GPTSwaTokenizer(_UpperCAmelCase )
UpperCamelCase_ =["This is a test", "I was born in 92000, and this is falsé."]
UpperCamelCase_ =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_UpperCAmelCase ) , _UpperCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_UpperCAmelCase ) , _UpperCAmelCase )
@slow
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =[
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
UpperCamelCase_ ={"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=_UpperCAmelCase , )
| 391
|
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_UpperCAmelCase = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None ) -> int:
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCamelCase_ = os.path.abspath("examples" )
for item in os.listdir(_SCREAMING_SNAKE_CASE ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ) and ".py" in item_path:
with self.subTest(
tested_script=_SCREAMING_SNAKE_CASE , feature_script=_SCREAMING_SNAKE_CASE , tested_section="main()" if parser_only else "training_function()" , ):
UpperCamelCase_ = compare_against_test(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = '''\n'''.join(_SCREAMING_SNAKE_CASE )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_ = diff.replace(_SCREAMING_SNAKE_CASE , "" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "" )
def lowercase ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
self.one_complete_example("complete_nlp_example.py" , _SCREAMING_SNAKE_CASE )
self.one_complete_example("complete_nlp_example.py" , _SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCamelCase_ = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example("complete_cv_example.py" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.one_complete_example("complete_cv_example.py" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _UpperCamelCase ( UpperCamelCase_ ):
_UpperCamelCase : Optional[int] = False
@classmethod
def lowercase ( cls: Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().setUpClass()
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase ( cls: List[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase ( self: Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '''.split()
UpperCamelCase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def lowercase ( self: int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '''.split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_SCREAMING_SNAKE_CASE )
self.assertNotIn("epoch 0:" , _SCREAMING_SNAKE_CASE )
self.assertIn("epoch 1:" , _SCREAMING_SNAKE_CASE )
def lowercase ( self: str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '''.split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_SCREAMING_SNAKE_CASE )
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , _SCREAMING_SNAKE_CASE )
self.assertIn("epoch 1:" , _SCREAMING_SNAKE_CASE )
else:
self.assertIn("epoch 0:" , _SCREAMING_SNAKE_CASE )
self.assertIn("epoch 1:" , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: Tuple ) -> str:
"""simple docstring"""
UpperCamelCase_ = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = re.findall("({.+})" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [r for r in results if '''accuracy''' in r][-1]
UpperCamelCase_ = ast.literal_eval(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_ = f'''\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , "tracking" ) ) )
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def lowercase ( self: int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 719
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
@staticmethod
def lowercase ( *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCamelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase_ = len(_SCREAMING_SNAKE_CASE )
self.assertGreater(_SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
"score": ANY(_SCREAMING_SNAKE_CASE ),
"label": ANY(_SCREAMING_SNAKE_CASE ),
"box": {"xmin": ANY(_SCREAMING_SNAKE_CASE ), "ymin": ANY(_SCREAMING_SNAKE_CASE ), "xmax": ANY(_SCREAMING_SNAKE_CASE ), "ymax": ANY(_SCREAMING_SNAKE_CASE )},
}
for i in range(_SCREAMING_SNAKE_CASE )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase ( self: Tuple ) -> List[str]:
"""simple docstring"""
pass
@require_torch
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCamelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
UpperCamelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowercase ( self: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
UpperCamelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
pass
@require_torch
@slow
def lowercase ( self: Any ) -> int:
"""simple docstring"""
UpperCamelCase_ = 0.2
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = 2
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 371
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : List[str] = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> List[Any]:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''sigopt'''
@staticmethod
def lowerCamelCase () -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''wandb'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Dict = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a_ : Any = logging.get_logger(__name__)
a_ : str = TypeVar('DatasetType', Dataset, IterableDataset)
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
else:
return _interleave_iterable_datasets(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , stopping_strategy=__UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCAmelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCAmelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCAmelCase ).__name__}." )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
else:
return _concatenate_iterable_datasets(__UpperCAmelCase , info=__UpperCAmelCase , split=__UpperCAmelCase , axis=__UpperCAmelCase )
| 194
| 0
|
'''simple docstring'''
def snake_case_ ( a__ : int ):
"""simple docstring"""
__lowercase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def snake_case_ ( a__ : int = 1_00 ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
for i in range(2 ,max_n + 1 ):
__lowercase = pre_numerator
__lowercase = 2 * i // 3 if i % 3 == 0 else 1
__lowercase = cur_numerator
__lowercase = e_cont * pre_numerator + temp
return sum_digits(a__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
A : Dict = ["""model.decoder.embed_positions.weights"""]
def snake_case_ ( a__ : Union[str, Any] ):
"""simple docstring"""
if "emb" in name:
__lowercase = name.replace("""emb""" ,"""model.decoder.embed_tokens""" )
if "transformer" in name:
__lowercase = name.replace("""transformer""" ,"""model.decoder""" )
if "cross_attention" in name:
__lowercase = name.replace("""cross_attention""" ,"""encoder_attn""" )
if "linear1" in name:
__lowercase = name.replace("""linear1""" ,"""fc1""" )
if "linear2" in name:
__lowercase = name.replace("""linear2""" ,"""fc2""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" ,"""self_attn_layer_norm""" )
if "norm_cross" in name:
__lowercase = name.replace("""norm_cross""" ,"""encoder_attn_layer_norm""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" ,"""final_layer_norm""" )
if "out_norm" in name:
__lowercase = name.replace("""out_norm""" ,"""model.decoder.layer_norm""" )
if "linears" in name:
__lowercase = name.replace("""linears""" ,"""lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowercase = name.replace("""condition_provider.conditioners.description.output_proj""" ,"""enc_to_dec_proj""" )
return name
def snake_case_ ( a__ : OrderedDict ,a__ : int ):
"""simple docstring"""
__lowercase = list(state_dict.keys() )
__lowercase = {}
for key in keys:
__lowercase = state_dict.pop(a__ )
__lowercase = rename_keys(a__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowercase = val[:hidden_size, :]
__lowercase = val[hidden_size : 2 * hidden_size, :]
__lowercase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowercase = val
else:
__lowercase = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( a__ : str ):
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowercase = 10_24
__lowercase = 24
__lowercase = 16
elif checkpoint == "medium":
__lowercase = 15_36
__lowercase = 48
__lowercase = 24
elif checkpoint == "large":
__lowercase = 20_48
__lowercase = 48
__lowercase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowercase = MusicgenDecoderConfig(
hidden_size=a__ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=a__ ,num_attention_heads=a__ ,)
return config
@torch.no_grad()
def snake_case_ ( a__ : Optional[Any] ,a__ : Dict=None ,a__ : Tuple=None ,a__ : Optional[int]="cpu" ):
"""simple docstring"""
__lowercase = MusicGen.get_pretrained(a__ ,device=a__ )
__lowercase = decoder_config_from_checkpoint(a__ )
__lowercase = fairseq_model.lm.state_dict()
__lowercase ,__lowercase = rename_state_dict(
a__ ,hidden_size=decoder_config.hidden_size )
__lowercase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowercase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowercase = MusicgenForCausalLM(a__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowercase ,__lowercase = decoder.load_state_dict(a__ ,strict=a__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a__ )
if len(a__ ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(a__ ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowercase = MusicgenForConditionalGeneration(text_encoder=a__ ,audio_encoder=a__ ,decoder=a__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a__ )
# check we can do a forward pass
__lowercase = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
__lowercase = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
__lowercase = model(input_ids=a__ ,decoder_input_ids=a__ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowercase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowercase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" ,padding_side="""left""" )
__lowercase = MusicgenProcessor(feature_extractor=a__ ,tokenizer=a__ )
# set the appropriate bos/pad token ids
__lowercase = 20_48
__lowercase = 20_48
# set other default generation config params
__lowercase = int(30 * audio_encoder.config.frame_rate )
__lowercase = True
__lowercase = 3.0
if pytorch_dump_folder is not None:
Path(a__ ).mkdir(exist_ok=a__ )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(a__ )
processor.push_to_hub(a__ )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
A : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 163
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Dict =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCamelCase : Tuple =25_00_04
_UpperCamelCase : Union[str, Any] =25_00_20
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer
SCREAMING_SNAKE_CASE_ = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = MBartaaTokenizer(_snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''<s>'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_snake_case ) , 10_54 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = MBartaaTokenizer(_snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_snake_case )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowerCamelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(_snake_case )
__lowerCamelCase = tokenizer_p.save_pretrained(_snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_snake_case , _snake_case )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(_snake_case )
__lowerCamelCase = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_snake_case )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(_snake_case , legacy_format=_snake_case )
__lowerCamelCase = tokenizer_p.save_pretrained(_snake_case )
# Checks it save with the same files
self.assertSequenceEqual(_snake_case , _snake_case )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(_snake_case )
__lowerCamelCase = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(_snake_case , legacy_format=_snake_case )
__lowerCamelCase = tokenizer_p.save_pretrained(_snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(_snake_case )
__lowerCamelCase = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = "facebook/mbart-large-50-one-to-many-mmt"
SCREAMING_SNAKE_CASE_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE_ = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _lowerCamelCase ( cls ):
"""simple docstring"""
__lowerCamelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowerCamelCase = 1
return cls
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
__lowerCamelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCamelCase = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
__lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _snake_case )
__lowerCamelCase = 10
__lowerCamelCase = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[0] , _snake_case )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_snake_case ) , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
__lowerCamelCase = MBartaaTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''' )
__lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors='''pt''' )
__lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors='''pt''' )
__lowerCamelCase = targets['''input_ids''']
__lowerCamelCase = shift_tokens_right(_snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_snake_case ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 316
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__( enum.Enum ):
__magic_name__ : Tuple = 0
__magic_name__ : Union[str, Any] = 1
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "generated"
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int )-> Dict:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a__( self : Tuple , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Union[str, Any] , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = {}
if truncation is not None:
UpperCAmelCase = truncation
UpperCAmelCase = generate_kwargs
UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Optional[Any]:
"""simple docstring"""
return True
def a__( self : Optional[int] , *lowerCAmelCase : Tuple , lowerCAmelCase : int )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase = ([prefix + arg for arg in args[0]],)
UpperCAmelCase = True
elif isinstance(args[0] , lowerCAmelCase ):
UpperCAmelCase = (prefix + args[0],)
UpperCAmelCase = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase = self.tokenizer(*lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
if (
isinstance(args[0] , lowerCAmelCase )
and all(isinstance(lowerCAmelCase , lowerCAmelCase ) for el in args[0] )
and all(len(lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a__( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCAmelCase : List[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self._parse_and_tokenize(lowerCAmelCase , truncation=lowerCAmelCase , **lowerCAmelCase )
return inputs
def a__( self : Optional[int] , lowerCAmelCase : str , **lowerCAmelCase : Dict )-> str:
"""simple docstring"""
if self.framework == "pt":
UpperCAmelCase , UpperCAmelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase , UpperCAmelCase = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowerCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase = self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase = output_ids.reshape(lowerCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=ReturnType.TEXT , lowerCAmelCase : Tuple=False )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase = {
F"""{self.return_name}_text""": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "summary"
def __call__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Union[str, Any] )-> Dict:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "translation"
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def a__( self : int , *lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=None )-> Any:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase , return_tensors=self.framework , truncation=lowerCAmelCase , src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase )
else:
return super()._parse_and_tokenize(*lowerCAmelCase , truncation=lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = super()._sanitize_parameters(**lowerCAmelCase )
if src_lang is not None:
UpperCAmelCase = src_lang
if tgt_lang is not None:
UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase = kwargs.get('''task''' , self.task )
UpperCAmelCase = task.split('''_''' )
if task and len(lowerCAmelCase ) == 4:
# translation, XX, to YY
UpperCAmelCase = items[1]
UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
| 210
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """microsoft/speecht5_tts"""
_UpperCamelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_UpperCamelCase = """text_reader"""
_UpperCamelCase = SpeechTaProcessor
_UpperCamelCase = SpeechTaForTextToSpeech
_UpperCamelCase = SpeechTaHifiGan
_UpperCamelCase = ["""text"""]
_UpperCamelCase = ["""audio"""]
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
if self.post_processor is None:
__lowerCAmelCase : Tuple = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCamelCase__ ( self , A_ , A_=None ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = self.pre_processor(text=A_ , return_tensors='''pt''' , truncation=A_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__lowerCAmelCase : Dict = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__lowerCAmelCase : Optional[Any] = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**A_ )
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(A_ ).cpu().detach()
| 583
|
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = len(lowercase__ )
__lowerCAmelCase : Any = len(lowercase__ )
__lowerCAmelCase : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCAmelCase : Optional[Any] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCAmelCase : Union[str, Any] = True
if a[i].islower():
__lowerCAmelCase : Optional[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __a(SCREAMING_SNAKE_CASE_ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_SCREAMING_SNAKE_CASE = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class lowerCAmelCase_ ( __magic_name__ ):
@staticmethod
def _snake_case ( _lowerCAmelCase ) -> str:
_lowerCAmelCase = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_lowerCAmelCase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
_lowerCAmelCase = model_type
_lowerCAmelCase = tf_checkpoint
_lowerCAmelCase = pytorch_dump_output
_lowerCAmelCase = config
_lowerCAmelCase = finetuning_task_name
def _snake_case ( self ) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = ""
else:
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = ""
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCAmelCase , self._config , self._pytorch_dump_output , _lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 18
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
super().setUp()
A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]:
A = 'adapt act apte'
A = 'adapt act apte'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A = 'adapt act apte'
A = ['adapt', 'act', 'ap@@', 'te']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
A = 'I am a small frog.'
A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids']
A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
A = 'I am a small frog .'
A = '.'
A = tok(A_ )['input_ids']
A = tok(A_ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 91
| 0
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "align_text_model"
def __init__( self : Union[str, Any] , UpperCAmelCase : int=30522 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : str=3072 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : List[Any]=1E-12 , UpperCAmelCase : str=0 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Any=True , **UpperCAmelCase : Optional[int] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : Tuple = position_embedding_type
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : Dict = pad_token_id
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : int ):
cls._set_token_in_kwargs(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Any = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__lowerCamelCase : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _snake_case ( a__ ):
snake_case__ = "align_vision_model"
def __init__( self : str , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : int , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Union[str, Any] = width_coefficient
__lowerCamelCase : Tuple = depth_coefficient
__lowerCamelCase : Tuple = depth_divisor
__lowerCamelCase : Tuple = kernel_sizes
__lowerCamelCase : Union[str, Any] = in_channels
__lowerCamelCase : Union[str, Any] = out_channels
__lowerCamelCase : Union[str, Any] = depthwise_padding
__lowerCamelCase : int = strides
__lowerCamelCase : Union[str, Any] = num_block_repeats
__lowerCamelCase : List[Any] = expand_ratios
__lowerCamelCase : List[Any] = squeeze_expansion_ratio
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : List[str] = pooling_type
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = batch_norm_eps
__lowerCamelCase : Union[str, Any] = batch_norm_momentum
__lowerCamelCase : str = drop_connect_rate
__lowerCamelCase : Tuple = sum(UpperCAmelCase ) * 4
@classmethod
def lowerCamelCase__ ( cls : int , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Tuple ):
cls._set_token_in_kwargs(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : int = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
__lowerCamelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _snake_case ( a__ ):
snake_case__ = "align"
snake_case__ = True
def __init__( self : int , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Any=640 , UpperCAmelCase : int=1.0 , UpperCAmelCase : Any=0.0_2 , **UpperCAmelCase : Dict , ):
super().__init__(**UpperCAmelCase )
if text_config is None:
__lowerCamelCase : str = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
__lowerCamelCase : Optional[Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
__lowerCamelCase : Optional[int] = AlignTextConfig(**UpperCAmelCase )
__lowerCamelCase : List[str] = AlignVisionConfig(**UpperCAmelCase )
__lowerCamelCase : Optional[int] = projection_dim
__lowerCamelCase : Optional[Any] = temperature_init_value
__lowerCamelCase : Optional[int] = initializer_range
@classmethod
def lowerCamelCase__ ( cls : Dict , UpperCAmelCase : AlignTextConfig , UpperCAmelCase : AlignVisionConfig , **UpperCAmelCase : Any ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Any = self.text_config.to_dict()
__lowerCamelCase : str = self.vision_config.to_dict()
__lowerCamelCase : List[Any] = self.__class__.model_type
return output
| 366
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__A = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__A = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
__A = {F"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = FunnelTokenizer
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = 2
def __init__( self : List[Any] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int="<unk>" , UpperCAmelCase : List[Any]="<sep>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<cls>" , UpperCAmelCase : int="<mask>" , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : Union[str, Any]="</s>" , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=None , UpperCAmelCase : int="##" , **UpperCAmelCase : Dict , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , clean_text=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , wordpieces_prefix=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
__lowerCamelCase : Tuple = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
__lowerCamelCase : Optional[int] = do_lower_case
__lowerCamelCase : Union[str, Any] = strip_accents
__lowerCamelCase : Optional[Any] = tokenize_chinese_chars
__lowerCamelCase : Optional[Any] = normalizer_class(**UpperCAmelCase )
__lowerCamelCase : List[Any] = do_lower_case
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None ):
__lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
__lowerCamelCase : List[Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 366
| 1
|
from __future__ import annotations
lowercase : Tuple = 10
def snake_case__ ( lowerCamelCase_ ):
A : str = 1
A : Union[str, Any] = max(_lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
A : list[list] = [[] for _ in range(_lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
A : List[str] = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCAmelCase )
# put each buckets' contents into list_of_ints
A : Dict = 0
for b in range(_lowerCAmelCase ):
for i in buckets[b]:
A : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542
|
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_UpperCamelCase = True
except ImportError:
_UpperCamelCase = False
try:
from torch.hub import _get_torch_home
_UpperCamelCase = _get_torch_home()
except ImportError:
_UpperCamelCase = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_UpperCamelCase = os.path.join(torch_cache_home, 'transformers')
_UpperCamelCase = 'https://cdn.huggingface.co'
_UpperCamelCase = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_UpperCamelCase = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_UpperCamelCase = os.path.join(PATH, 'config.yaml')
_UpperCamelCase = os.path.join(PATH, 'attributes.txt')
_UpperCamelCase = os.path.join(PATH, 'objects.txt')
_UpperCamelCase = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_UpperCamelCase = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_UpperCamelCase = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_UpperCamelCase = 'pytorch_model.bin'
_UpperCamelCase = 'config.yaml'
def a_ ( _lowerCAmelCase=OBJECTS ,_lowerCAmelCase=ATTRIBUTES ) -> Union[str, Any]:
__lowerCamelCase : Dict = []
with open(_lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__lowerCamelCase : List[Any] = []
with open(_lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Any = OrderedDict()
with open(_lowerCAmelCase ,'rb' ) as f:
__lowerCamelCase : str = pkl.load(_lowerCAmelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCamelCase : Any = ckp.pop(_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,np.ndarray ):
__lowerCamelCase : Dict = torch.tensor(_lowerCAmelCase )
else:
assert isinstance(_lowerCAmelCase ,torch.tensor ), type(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = v
return r
class lowerCamelCase_ :
"""simple docstring"""
a_ ={}
def __init__( self : int , _a : dict , _a : str = "root" , _a : int=0 ) -> Union[str, Any]:
__lowerCamelCase : Dict = name
__lowerCamelCase : Union[str, Any] = level
__lowerCamelCase : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCamelCase : int = copy.deepcopy(_a )
__lowerCamelCase : List[str] = copy.deepcopy(_a )
if isinstance(_a , _a ):
__lowerCamelCase : str = Config(_a , name=_a , level=level + 1 )
__lowerCamelCase : Union[str, Any] = v
setattr(self , _a , _a )
__lowerCamelCase : Optional[int] = d
def __repr__( self : int ) -> Any:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : int , _a : Tuple , _a : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Optional[Any] = val
__lowerCamelCase : List[str] = key.split('.' )
__lowerCamelCase : str = len(_a ) - 1
__lowerCamelCase : str = self._pointer
if len(_a ) > 1:
for i, l in enumerate(_a ):
if hasattr(self , _a ) and isinstance(getattr(self , _a ) , _a ):
setattr(getattr(self , _a ) , '.'.join(levels[i:] ) , _a )
if l == last_level:
__lowerCamelCase : Union[str, Any] = val
else:
__lowerCamelCase : List[Any] = pointer[l]
def _lowercase ( self : Dict ) -> Optional[Any]:
return self._pointer
def _lowercase ( self : Any , _a : List[str] , _a : List[str] ) -> Dict:
with open(f'{file_name}' , 'w' ) as stream:
dump(_a , _a )
def _lowercase ( self : Any , _a : Union[str, Any] , _a : List[Any] ) -> List[Any]:
with open(f'{file_name}' , 'w' ) as stream:
json.dump(_a , _a )
@staticmethod
def _lowercase ( _a : Any ) -> List[str]:
with open(_a ) as stream:
__lowerCamelCase : Dict = load(_a , Loader=_a )
return data
def __str__( self : Any ) -> List[str]:
__lowerCamelCase : Dict = ' '
if self._name != "root":
__lowerCamelCase : Dict = f'{t * (self._level-1)}{self._name}:\n'
else:
__lowerCamelCase : str = ''
__lowerCamelCase : List[str] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_a , _a ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(_a ).__name__})\n'
__lowerCamelCase : Any = level
return r[:-1]
@classmethod
def _lowercase ( cls : int , _a : str , **_a : List[str] ) -> Union[str, Any]:
__lowerCamelCase ,__lowerCamelCase : str = cls.get_config_dict(_a , **_a )
return cls(_a )
@classmethod
def _lowercase ( cls : Dict , _a : str , **_a : List[str] ) -> List[Any]:
__lowerCamelCase : str = kwargs.pop('cache_dir' , _a )
__lowerCamelCase : List[Any] = kwargs.pop('force_download' , _a )
__lowerCamelCase : List[Any] = kwargs.pop('resume_download' , _a )
__lowerCamelCase : List[Any] = kwargs.pop('proxies' , _a )
__lowerCamelCase : Optional[Any] = kwargs.pop('local_files_only' , _a )
if os.path.isdir(_a ):
__lowerCamelCase : Tuple = os.path.join(_a , _a )
elif os.path.isfile(_a ) or is_remote_url(_a ):
__lowerCamelCase : List[str] = pretrained_model_name_or_path
else:
__lowerCamelCase : List[Any] = hf_bucket_url(_a , filename=_a , use_cdn=_a )
try:
# Load from URL or cache if already cached
__lowerCamelCase : str = cached_path(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCamelCase : Optional[Any] = Config.load_yaml(_a )
except EnvironmentError:
__lowerCamelCase : Optional[int] = 'Can\'t load config for'
raise EnvironmentError(_a )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_a ), kwargs
def a_ ( _lowerCAmelCase ) -> Dict:
__lowerCamelCase : Dict = torch.load('dump.pt' ,map_location=in_tensor.device )
__lowerCamelCase : Optional[int] = in_tensor.numpy()
__lowerCamelCase : int = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,rtol=0.01 ,atol=0.1 ), (
F'{sum([1 for x in np.isclose(_lowerCAmelCase ,_lowerCAmelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : List[str] = urlparse(_lowerCAmelCase )
return parsed.scheme in ("http", "https")
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=True ) -> str:
__lowerCamelCase : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCamelCase : List[str] = '/' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=0 ,_lowerCAmelCase=None ,) -> Any:
__lowerCamelCase : Tuple = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
ua += "; " + "; ".join('{}/{}'.format(_lowerCAmelCase ,_lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
ua += "; " + user_agent
__lowerCamelCase : List[Any] = {'user-agent': ua}
if resume_size > 0:
__lowerCamelCase : List[str] = 'bytes=%d-' % (resume_size,)
__lowerCamelCase : Optional[Any] = requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ,proxies=_lowerCAmelCase ,headers=_lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowerCamelCase : List[Any] = response.headers.get('Content-Length' )
__lowerCamelCase : Tuple = resume_size + int(_lowerCAmelCase ) if content_length is not None else None
__lowerCamelCase : int = tqdm(
unit='B' ,unit_scale=_lowerCAmelCase ,total=_lowerCAmelCase ,initial=_lowerCAmelCase ,desc='Downloading' ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCAmelCase ) )
temp_file.write(_lowerCAmelCase )
progress.close()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=10 ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,) -> List[str]:
if cache_dir is None:
__lowerCamelCase : Any = TRANSFORMERS_CACHE
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Tuple = str(_lowerCAmelCase )
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
__lowerCamelCase : int = None
if not local_files_only:
try:
__lowerCamelCase : Optional[int] = requests.head(_lowerCAmelCase ,allow_redirects=_lowerCAmelCase ,proxies=_lowerCAmelCase ,timeout=_lowerCAmelCase )
if response.status_code == 200:
__lowerCamelCase : List[Any] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCamelCase : Tuple = url_to_filename(_lowerCAmelCase ,_lowerCAmelCase )
# get cache path to put the file
__lowerCamelCase : Tuple = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCAmelCase ):
return cache_path
else:
__lowerCamelCase : int = [
file
for file in fnmatch.filter(os.listdir(_lowerCAmelCase ) ,filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_lowerCAmelCase ) > 0:
return os.path.join(_lowerCAmelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCamelCase : Union[str, Any] = cache_path + '.lock'
with FileLock(_lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCamelCase : Optional[int] = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_lowerCAmelCase ,'a+b' ) as f:
yield f
__lowerCamelCase : Optional[int] = _resumable_file_manager
if os.path.exists(_lowerCAmelCase ):
__lowerCamelCase : int = os.stat(_lowerCAmelCase ).st_size
else:
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : str = partial(tempfile.NamedTemporaryFile ,dir=_lowerCAmelCase ,delete=_lowerCAmelCase )
__lowerCamelCase : List[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' ,_lowerCAmelCase ,temp_file.name ,)
http_get(
_lowerCAmelCase ,_lowerCAmelCase ,proxies=_lowerCAmelCase ,resume_size=_lowerCAmelCase ,user_agent=_lowerCAmelCase ,)
os.replace(temp_file.name ,_lowerCAmelCase )
__lowerCamelCase : List[Any] = {'url': url, 'etag': etag}
__lowerCamelCase : str = cache_path + '.json'
with open(_lowerCAmelCase ,'w' ) as meta_file:
json.dump(_lowerCAmelCase ,_lowerCAmelCase )
return cache_path
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ) -> Dict:
__lowerCamelCase : Any = url.encode('utf-8' )
__lowerCamelCase : Tuple = shaaaa(_lowerCAmelCase )
__lowerCamelCase : Dict = url_hash.hexdigest()
if etag:
__lowerCamelCase : List[str] = etag.encode('utf-8' )
__lowerCamelCase : Any = shaaaa(_lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,) -> str:
if cache_dir is None:
__lowerCamelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : List[str] = str(_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : List[str] = str(_lowerCAmelCase )
if is_remote_url(_lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowerCamelCase : Optional[Any] = get_from_cache(
_lowerCAmelCase ,cache_dir=_lowerCAmelCase ,force_download=_lowerCAmelCase ,proxies=_lowerCAmelCase ,resume_download=_lowerCAmelCase ,user_agent=_lowerCAmelCase ,local_files_only=_lowerCAmelCase ,)
elif os.path.exists(_lowerCAmelCase ):
# File, and it exists.
__lowerCamelCase : List[Any] = url_or_filename
elif urlparse(_lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_lowerCAmelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCAmelCase ) and not tarfile.is_tarfile(_lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCamelCase ,__lowerCamelCase : Tuple = os.path.split(_lowerCAmelCase )
__lowerCamelCase : str = output_file.replace('.' ,'-' ) + '-extracted'
__lowerCamelCase : Optional[int] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ) and os.listdir(_lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCamelCase : List[Any] = output_path + '.lock'
with FileLock(_lowerCAmelCase ):
shutil.rmtree(_lowerCAmelCase ,ignore_errors=_lowerCAmelCase )
os.makedirs(_lowerCAmelCase )
if is_zipfile(_lowerCAmelCase ):
with ZipFile(_lowerCAmelCase ,'r' ) as zip_file:
zip_file.extractall(_lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCAmelCase ):
__lowerCamelCase : Tuple = tarfile.open(_lowerCAmelCase )
tar_file.extractall(_lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_lowerCAmelCase ) )
return output_path_extracted
return output_path
def a_ ( _lowerCAmelCase ,_lowerCAmelCase="," ) -> List[str]:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase ) as f:
__lowerCamelCase : Optional[Any] = eval(f.read() )
else:
__lowerCamelCase : Union[str, Any] = requests.get(_lowerCAmelCase )
try:
__lowerCamelCase : Optional[Any] = requests.json()
except Exception:
__lowerCamelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCamelCase : Dict = eval(_lowerCAmelCase )
except Exception:
__lowerCamelCase : Dict = data.split('\n' )
req.close()
return data
def a_ ( _lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : str = requests.get(_lowerCAmelCase )
__lowerCamelCase : Dict = np.array(Image.open(BytesIO(response.content ) ) )
return img
def a_ ( _lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : str = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCAmelCase )
with open(_lowerCAmelCase ,'rb' ) as stream:
__lowerCamelCase : str = pkl.load(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = weights.pop('model' )
__lowerCamelCase : Dict = {}
for k, v in model.items():
__lowerCamelCase : str = torch.from_numpy(_lowerCAmelCase )
if "running_var" in k:
__lowerCamelCase : Tuple = torch.tensor([0] )
__lowerCamelCase : Any = k.replace('running_var' ,'num_batches_tracked' )
__lowerCamelCase : List[Any] = zero
return new
def a_ ( ) -> Dict:
print(F'{os.path.abspath(os.path.join(_lowerCAmelCase ,os.pardir ) )}/demo.ipynb' )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase="RGB" ) -> List[Any]:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ):
__lowerCamelCase : Any = cva.imread(_lowerCAmelCase )
else:
__lowerCamelCase : int = get_image_from_url(_lowerCAmelCase )
assert img is not None, F'could not connect to: {im}'
__lowerCamelCase : Dict = cva.cvtColor(_lowerCAmelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCamelCase : Any = img[:, :, ::-1]
return img
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=1 ) -> Union[str, Any]:
return (images[i : i + batch] for i in range(0 ,len(_lowerCAmelCase ) ,_lowerCAmelCase ))
| 459
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = '''falcon'''
_lowerCamelCase = ['''past_key_values''']
def __init__( self , UpperCamelCase_=6_5024 , UpperCamelCase_=4544 , UpperCamelCase_=32 , UpperCamelCase_=71 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0_2 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=11 , UpperCamelCase_=11 , **UpperCamelCase_ , ):
__magic_name__ = vocab_size
# Backward compatibility with n_embed kwarg
__magic_name__ = kwargs.pop('''n_embed''' , UpperCamelCase_ )
__magic_name__ = hidden_size if n_embed is None else n_embed
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = layer_norm_epsilon
__magic_name__ = initializer_range
__magic_name__ = use_cache
__magic_name__ = hidden_dropout
__magic_name__ = attention_dropout
__magic_name__ = bos_token_id
__magic_name__ = eos_token_id
__magic_name__ = num_attention_heads if num_kv_heads is None else num_kv_heads
__magic_name__ = alibi
__magic_name__ = new_decoder_architecture
__magic_name__ = multi_query # Ignored when new_decoder_architecture is True
__magic_name__ = parallel_attn
__magic_name__ = bias
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase__ ( self ):
return not self.alibi
| 190
|
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__lowerCamelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> str:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( __UpperCamelCase ) -> Union[str, Any]:
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCamelCase )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__magic_name__ = tmp_path_factory.getbasetemp() / '''cache'''
__magic_name__ = test_hf_cache_home / '''datasets'''
__magic_name__ = test_hf_cache_home / '''metrics'''
__magic_name__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(__UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(__UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(__UpperCamelCase ) )
__magic_name__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(__UpperCamelCase ) )
__magic_name__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='''session''' )
def lowercase ( ) -> Any:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> str:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , __UpperCamelCase )
@pytest.fixture
def lowercase ( __UpperCamelCase ) -> int:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , __UpperCamelCase )
| 190
| 1
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self : Optional[int] , **UpperCAmelCase__ : List[str] ) -> Optional[int]:
lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self : Any ) -> Dict:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Any:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> List[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> int:
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCAmelCase__ , prev_timestep=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_994_987 ) ) < 1E-5
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(variance_type='learned_range' )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCAmelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=UpperCAmelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=UpperCAmelCase__ ) - -0.0_010_011 < 1E-5
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = scheduler.timesteps
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCAmelCase__ ):
# 1. predict noise residual
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(2_5 )
lowerCAmelCase = scheduler.timesteps
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCAmelCase__ ):
# 1. predict noise residual
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase = None
else:
lowerCAmelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , prev_timestep=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
| 133
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__snake_case ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__snake_case =importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__snake_case =spec.loader.load_module()
__snake_case =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__snake_case =re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__snake_case ={
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def a_ ( ):
lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase = False
# source code of `config_class`
lowerCAmelCase = inspect.getsource(lowerCamelCase )
lowerCAmelCase = _re_checkpoint.findall(lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase , lowerCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase = True
break
lowerCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = '\n'.join(sorted(lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 133
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ = False
def __A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
SCREAMING_SNAKE_CASE : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=20 , UpperCamelCase__ : Optional[int]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )) for i in range(len(UpperCamelCase__ ) )]
SCREAMING_SNAKE_CASE : str = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
SCREAMING_SNAKE_CASE : int = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
SCREAMING_SNAKE_CASE : int = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE : Dict = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
SCREAMING_SNAKE_CASE : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE : Tuple = ''' ''' + output_txt
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def __A ( self : Tuple , **UpperCamelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer('''m xxx ɪ''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
SCREAMING_SNAKE_CASE : Dict = tokenizer('''maɪ c''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [3, 200] ) # mai should be <unk> (=3)
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how are you'''
SCREAMING_SNAKE_CASE : int = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
SCREAMING_SNAKE_CASE : str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
SCREAMING_SNAKE_CASE : int = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
SCREAMING_SNAKE_CASE : str = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Tuple = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(UpperCamelCase__ , phonemizer_lang='''en-us''' ).input_ids
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCamelCase__ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(UpperCamelCase__ , '''ɛ l o h aʊ a ʁ j u''' )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Dict = '''Hello how Are you'''
SCREAMING_SNAKE_CASE : str = '''hello how are you'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCamelCase__ ).input_ids
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
SCREAMING_SNAKE_CASE : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __A ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
SCREAMING_SNAKE_CASE : Any = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase__ ) )
# transform list to ModelOutput
SCREAMING_SNAKE_CASE : Any = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
[recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for la, la in zip(UpperCamelCase__ , UpperCamelCase__ )]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
SCREAMING_SNAKE_CASE : str = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ ) for ids in sample_ids]
check_list_tuples_equal(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __A ( self : Any ):
'''simple docstring'''
pass
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : List[str] = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE : Dict = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.add_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size + len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.add_special_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size_a + len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : Tuple = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(output['''text'''] , UpperCamelCase__ )
| 710
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ : Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A__ : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A__ : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , {"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ )]} )
# No kwarg
A__ = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , {"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ )]} )
A__ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , {"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ )]} )
A__ = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , {"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A__ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , {"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A__ = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , {"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ )]} )
# https://github.com/huggingface/transformers/issues/13846
A__ = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )]}
for i in range(1 )
] , )
A__ = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"sequence": ANY(SCREAMING_SNAKE_CASE__ ), "labels": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )], "scores": [ANY(SCREAMING_SNAKE_CASE__ ), ANY(SCREAMING_SNAKE_CASE__ )]}
for i in range(2 )
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
classifier(SCREAMING_SNAKE_CASE__ , candidate_labels="politics" )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=SCREAMING_SNAKE_CASE__ , )
self.run_entailment_id(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = zero_shot_classifier.model.config
A__ = config.labelaid
A__ = zero_shot_classifier.entailment_id
A__ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A__ = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A__ = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE__ , zero_shot_classifier.entailment_id )
@require_torch
def snake_case__ ( self ) -> List[Any]:
A__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def snake_case__ ( self ) -> Optional[int]:
A__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
A__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def snake_case__ ( self ) -> List[Any]:
A__ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
A__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def snake_case__ ( self ) -> Union[str, Any]:
A__ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
A__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def snake_case__ ( self ) -> List[str]:
A__ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
A__ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 104
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : bytes ) -> str:
"""simple docstring"""
return "".join([hex(UpperCAmelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase_ )] )
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bytes:
"""simple docstring"""
if (len(UpperCAmelCase_ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase_ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 16 ) for i in range(0, len(UpperCAmelCase_ ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = True
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(__UpperCamelCase ) , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCamelCase , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=__UpperCamelCase )
lowercase_ = pickle.dumps(__UpperCamelCase )
pickle.loads(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(__UpperCamelCase )
lowercase_ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
lowercase_ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
lowercase_ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(__UpperCamelCase )
lowercase_ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""facebook/xglm-564M""" , padding=__UpperCamelCase , )
| 707
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 0
|
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ : Tuple = 300 # TEMPERATURE (unit = K)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
import numpy
# List of input, output pairs
A_ : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
A_ : Any = [2, 4, 1, 5]
A_ : List[Any] = len(train_data)
A_ : List[Any] = 0.009
def snake_case (UpperCAmelCase__ , UpperCAmelCase__="train" ) -> Optional[int]:
return calculate_hypothesis_value(UpperCAmelCase__ , UpperCAmelCase__ ) - output(
UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[Any] = 0
for i in range(len(UpperCAmelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=m ) -> Optional[Any]:
UpperCamelCase_: Any = 0
for i in range(UpperCAmelCase__ ):
if index == -1:
summation_value += _error(UpperCAmelCase__ )
else:
summation_value += _error(UpperCAmelCase__ ) * train_data[i][0][index]
return summation_value
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[int] = summation_of_cost_derivative(UpperCAmelCase__ , UpperCAmelCase__ ) / m
return cost_derivative_value
def snake_case () -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_: str = 0.00_0002
UpperCamelCase_: Any = 0
UpperCamelCase_: int = 0
while True:
j += 1
UpperCamelCase_: int = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase__ ) ):
UpperCamelCase_: Any = get_cost_derivative(i - 1 )
UpperCamelCase_: Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ , rtol=UpperCAmelCase__ , ):
break
UpperCamelCase_: Optional[int] = temp_parameter_vector
print(('Number of iterations:', j) )
def snake_case () -> int:
for i in range(len(UpperCAmelCase__ ) ):
print(('Actual output value:', output(UpperCAmelCase__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 57
| 1
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = f'Input value of [number={number}] must be an integer'
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return len(set(__UpperCamelCase ) ) == len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class snake_case ( lowercase_ ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase=False, _lowercase=True, _lowercase=False, _lowercase="<s>", _lowercase="</s>", _lowercase="<unk>", _lowercase="<sep>", _lowercase="<pad>", _lowercase="<cls>", _lowercase="<mask>", _lowercase=["<eop>", "<eod>"], _lowercase = None, **_lowercase, ) -> None:
SCREAMING_SNAKE_CASE_ = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else mask_token
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase, remove_space=_lowercase, keep_accents=_lowercase, bos_token=_lowercase, eos_token=_lowercase, unk_token=_lowercase, sep_token=_lowercase, pad_token=_lowercase, cls_token=_lowercase, mask_token=_lowercase, additional_special_tokens=_lowercase, sp_model_kwargs=self.sp_model_kwargs, **_lowercase, )
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
SCREAMING_SNAKE_CASE_ = jieba
SCREAMING_SNAKE_CASE_ = str.maketrans(' \n', '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def a__ ( self ) -> Tuple:
return len(self.sp_model )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self, _lowercase ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self, _lowercase ) -> Dict:
if self.remove_space:
SCREAMING_SNAKE_CASE_ = ' '.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE_ = inputs
SCREAMING_SNAKE_CASE_ = outputs.replace('``', '"' ).replace('\'\'', '"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ = unicodedata.normalize('NFKD', _lowercase )
SCREAMING_SNAKE_CASE_ = ''.join([c for c in outputs if not unicodedata.combining(_lowercase )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ = outputs.lower()
return outputs
def a__ ( self, _lowercase ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.preprocess_text(_lowercase )
SCREAMING_SNAKE_CASE_ = self.sp_model.encode(_lowercase, out_type=_lowercase )
SCREAMING_SNAKE_CASE_ = []
for piece in pieces:
if len(_lowercase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase, '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE_ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowercase )
else:
new_pieces.append(_lowercase )
return new_pieces
def a__ ( self, _lowercase ) -> str:
return self.sp_model.PieceToId(_lowercase )
def a__ ( self, _lowercase ) -> int:
return self.sp_model.IdToPiece(_lowercase )
def a__ ( self, _lowercase ) -> str:
SCREAMING_SNAKE_CASE_ = ''.join(_lowercase ).replace(_lowercase, ' ' ).strip()
return out_string
def a__ ( self, _lowercase, _lowercase = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a__ ( self, _lowercase, _lowercase = None, _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase, token_ids_a=_lowercase, already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1, 1]
return ([0] * len(_lowercase )) + [1, 1]
def a__ ( self, _lowercase, _lowercase = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a__ ( self, _lowercase, _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowercase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase, 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def a__ ( self, *_lowercase, **_lowercase ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = super()._decode(*_lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = text.replace(' ', '' ).replace('\u2582', ' ' ).replace('\u2583', '\n' )
return text
| 294
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = ["""image_processor""", """tokenizer"""]
_a = """CLIPImageProcessor"""
_a = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self, _lowercase=None, _lowercase=None, **_lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', _lowercase, )
SCREAMING_SNAKE_CASE_ = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowercase, _lowercase )
def __call__( self, _lowercase=None, _lowercase=None, _lowercase=None, **_lowercase ) -> str:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowercase, return_tensors=_lowercase, **_lowercase )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(_lowercase, return_tensors=_lowercase, **_lowercase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ), tensor_type=_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> Optional[int]:
return self.tokenizer.batch_decode(*_lowercase, **_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> Optional[Any]:
return self.tokenizer.decode(*_lowercase, **_lowercase )
@property
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 294
| 1
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=10_24 ) -> Union[str, Any]:
__snake_case , __snake_case = [], []
__snake_case = list(zip(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case , __snake_case = sorted_examples[0]
def is_too_big(_UpperCAmelCase : List[Any] ):
return tok(_UpperCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__snake_case = new_src + " " + src
__snake_case = new_tgt + " " + tgt
if is_too_big(_UpperCAmelCase ) or is_too_big(_UpperCAmelCase ): # cant fit, finalize example
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
__snake_case , __snake_case = src, tgt
else: # can fit, keep adding
__snake_case , __snake_case = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
return finished_src, finished_tgt
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Path , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> List[Any]:
__snake_case = Path(_UpperCAmelCase )
save_path.mkdir(exist_ok=_UpperCAmelCase )
for split in ["train"]:
__snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__snake_case = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
__snake_case = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
__snake_case , __snake_case = pack_examples(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
print(F'''packed {split} split from {len(_UpperCAmelCase )} examples -> {len(_UpperCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(_UpperCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(_UpperCAmelCase ) )
for split in ["val", "test"]:
__snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.target''' )
def __UpperCAmelCase ( ) -> Tuple:
__snake_case = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_UpperCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_UpperCAmelCase , default=1_28 )
parser.add_argument("--data_dir" , type=_UpperCAmelCase )
parser.add_argument("--save_path" , type=_UpperCAmelCase )
__snake_case = parser.parse_args()
__snake_case = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 680
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class a__ ( UpperCamelCase_ ):
def __init__( self : Optional[Any] ,a__ : Dict ,a__ : Union[str, Any]=False ,a__ : Union[str, Any]=True ,a__ : Dict=False ,a__ : Any="<s>" ,a__ : Any="</s>" ,a__ : Optional[int]="<unk>" ,a__ : int="<sep>" ,a__ : Tuple="<pad>" ,a__ : Any="<cls>" ,a__ : List[Any]="<mask>" ,a__ : List[str]=["<eop>", "<eod>"] ,a__ : Optional[Dict[str, Any]] = None ,**a__ : Optional[int] ,) -> None:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = AddedToken(a__ ,lstrip=a__ ,rstrip=a__) if isinstance(a__ ,a__) else mask_token
_lowerCAmelCase:Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ ,remove_space=a__ ,keep_accents=a__ ,bos_token=a__ ,eos_token=a__ ,unk_token=a__ ,sep_token=a__ ,pad_token=a__ ,cls_token=a__ ,mask_token=a__ ,additional_special_tokens=a__ ,sp_model_kwargs=self.sp_model_kwargs ,**a__ ,)
_lowerCAmelCase:List[Any] = 3
_lowerCAmelCase:Union[str, Any] = do_lower_case
_lowerCAmelCase:Dict = remove_space
_lowerCAmelCase:Union[str, Any] = keep_accents
_lowerCAmelCase:Dict = vocab_file
_lowerCAmelCase:Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a__)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''')
_lowerCAmelCase:str = jieba
_lowerCAmelCase:List[Any] = str.maketrans(''' \n''' ,'''\u2582\u2583''')
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __UpperCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model)
def __UpperCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = {self.convert_ids_to_tokens(a__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.__dict__.copy()
_lowerCAmelCase:Any = None
return state
def __setstate__( self : List[str] ,a__ : Tuple) -> Any:
"""simple docstring"""
_lowerCAmelCase:Any = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
_lowerCAmelCase:str = {}
_lowerCAmelCase:List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCamelCase ( self : Dict ,a__ : Union[str, Any]) -> Any:
"""simple docstring"""
if self.remove_space:
_lowerCAmelCase:List[str] = ''' '''.join(inputs.strip().split())
else:
_lowerCAmelCase:str = inputs
_lowerCAmelCase:int = outputs.replace('''``''' ,'''"''').replace('''\'\'''' ,'''"''')
if not self.keep_accents:
_lowerCAmelCase:Any = unicodedata.normalize('''NFKD''' ,a__)
_lowerCAmelCase:Dict = ''''''.join([c for c in outputs if not unicodedata.combining(a__)])
if self.do_lower_case:
_lowerCAmelCase:Dict = outputs.lower()
return outputs
def __UpperCamelCase ( self : str ,a__ : str) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:int = self.preprocess_text(a__)
_lowerCAmelCase:int = self.sp_model.encode(a__ ,out_type=a__)
_lowerCAmelCase:Tuple = []
for piece in pieces:
if len(a__) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
_lowerCAmelCase:Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a__ ,''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
_lowerCAmelCase:str = cur_pieces[1:]
else:
_lowerCAmelCase:Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a__)
else:
new_pieces.append(a__)
return new_pieces
def __UpperCamelCase ( self : int ,a__ : Any) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(a__)
def __UpperCamelCase ( self : Union[str, Any] ,a__ : int) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(a__)
def __UpperCamelCase ( self : Dict ,a__ : Any) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = ''''''.join(a__).replace(a__ ,''' ''').strip()
return out_string
def __UpperCamelCase ( self : Union[str, Any] ,a__ : List[int] ,a__ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = [self.sep_token_id]
_lowerCAmelCase:Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCamelCase ( self : Tuple ,a__ : List[int] ,a__ : Optional[List[int]] = None ,a__ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ ,token_ids_a=a__ ,already_has_special_tokens=a__)
if token_ids_a is not None:
return ([0] * len(a__)) + [1] + ([0] * len(a__)) + [1, 1]
return ([0] * len(a__)) + [1, 1]
def __UpperCamelCase ( self : Optional[int] ,a__ : List[int] ,a__ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = [self.sep_token_id]
_lowerCAmelCase:List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __UpperCamelCase ( self : Dict ,a__ : str ,a__ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a__):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_lowerCAmelCase:List[str] = os.path.join(
a__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(a__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,a__)
elif not os.path.isfile(self.vocab_file):
with open(a__ ,'''wb''') as fi:
_lowerCAmelCase:Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (out_vocab_file,)
def __UpperCamelCase ( self : Dict ,*a__ : str ,**a__ : Optional[int]) -> str:
"""simple docstring"""
_lowerCAmelCase:List[str] = super()._decode(*a__ ,**a__)
_lowerCAmelCase:List[str] = text.replace(''' ''' ,'''''').replace('''\u2582''' ,''' ''').replace('''\u2583''' ,'''\n''')
return text
| 227
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {}
class a__ ( UpperCamelCase_ ):
snake_case__ = '''llama'''
snake_case__ = ['''past_key_values''']
def __init__( self : str ,a__ : Union[str, Any]=3_2000 ,a__ : Any=4096 ,a__ : int=1_1008 ,a__ : int=32 ,a__ : Optional[Any]=32 ,a__ : List[Any]=None ,a__ : List[Any]="silu" ,a__ : Union[str, Any]=2048 ,a__ : Any=0.02 ,a__ : Any=1E-6 ,a__ : int=True ,a__ : Optional[int]=0 ,a__ : Any=1 ,a__ : Any=2 ,a__ : str=1 ,a__ : str=False ,a__ : Union[str, Any]=None ,**a__ : List[Any] ,) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = vocab_size
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:Dict = intermediate_size
_lowerCAmelCase:List[Any] = num_hidden_layers
_lowerCAmelCase:List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase:List[Any] = num_attention_heads
_lowerCAmelCase:Any = num_key_value_heads
_lowerCAmelCase:Union[str, Any] = hidden_act
_lowerCAmelCase:int = initializer_range
_lowerCAmelCase:Any = rms_norm_eps
_lowerCAmelCase:Optional[Any] = pretraining_tp
_lowerCAmelCase:str = use_cache
_lowerCAmelCase:Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,tie_word_embeddings=a__ ,**a__ ,)
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a__) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'got {self.rope_scaling}')
_lowerCAmelCase:Optional[Any] = self.rope_scaling.get('''type''' ,a__)
_lowerCAmelCase:Any = self.rope_scaling.get('''factor''' ,a__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(a__ ,a__) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 227
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
__UpperCAmelCase = '▁'
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'token_type_ids']
A = FNetTokenizer
def __init__( self ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=False ,__SCREAMING_SNAKE_CASE=True ,__SCREAMING_SNAKE_CASE=True ,__SCREAMING_SNAKE_CASE="<unk>" ,__SCREAMING_SNAKE_CASE="[SEP]" ,__SCREAMING_SNAKE_CASE="<pad>" ,__SCREAMING_SNAKE_CASE="[CLS]" ,__SCREAMING_SNAKE_CASE="[MASK]" ,**__SCREAMING_SNAKE_CASE ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE : List[str] = (
AddedToken(__SCREAMING_SNAKE_CASE ,lstrip=__SCREAMING_SNAKE_CASE ,rstrip=__SCREAMING_SNAKE_CASE ,normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,do_lower_case=__SCREAMING_SNAKE_CASE ,remove_space=__SCREAMING_SNAKE_CASE ,keep_accents=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE : List[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Dict = remove_space
SCREAMING_SNAKE_CASE : int = keep_accents
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE : Tuple = False if not self.vocab_file else True
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__SCREAMING_SNAKE_CASE ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 706
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list ) -> bool:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case_ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(snake_case_ ) == 1:
return True
SCREAMING_SNAKE_CASE : Any = series[1] - series[0]
for index in range(len(snake_case_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list ) -> float:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case_ ) == 0:
raise ValueError('Input list must be a non empty list' )
SCREAMING_SNAKE_CASE : List[str] = 0
for val in series:
answer += val
return answer / len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220
| 0
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__a = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
__a = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
__a = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__a = f"down_blocks.{i}.resnets.{j}."
__a = f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__a = f"down_blocks.{i}.attentions.{j}."
__a = f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__a = f"up_blocks.{i}.resnets.{j}."
__a = f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__a = f"up_blocks.{i}.attentions.{j}."
__a = f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__a = f"down_blocks.{i}.downsamplers.0.conv."
__a = f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__a = f"up_blocks.{i}.upsamplers.0."
__a = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__a = 'mid_block.attentions.0.'
__a = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__a = f"mid_block.resnets.{j}."
__a = f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a ( snake_case__: Tuple ):
'''simple docstring'''
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowercase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase_ = v.replace(snake_case__ , snake_case__ )
lowercase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase_ = v.replace(snake_case__ , snake_case__ )
lowercase_ = v
lowercase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__a = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__a = f"encoder.down_blocks.{i}.resnets.{j}."
__a = f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__a = f"down_blocks.{i}.downsamplers.0."
__a = f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__a = f"up_blocks.{i}.upsamplers.0."
__a = f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__a = f"decoder.up_blocks.{i}.resnets.{j}."
__a = f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__a = f"mid_block.resnets.{i}."
__a = f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__a = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a ( snake_case__: Tuple ):
'''simple docstring'''
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase_ = v.replace(snake_case__ , snake_case__ )
lowercase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase_ = v.replace(snake_case__ , snake_case__ )
lowercase_ = v
lowercase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase_ = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
lowercase_ = reshape_weight_for_sd(snake_case__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__a = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
__a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__a = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__a = {'q': 0, 'k': 1, 'v': 2}
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = {}
lowercase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
lowercase_ = k[: -len('''.q_proj.weight''' )]
lowercase_ = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
lowercase_ = [None, None, None]
lowercase_ = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
lowercase_ = k[: -len('''.q_proj.bias''' )]
lowercase_ = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
lowercase_ = [None, None, None]
lowercase_ = v
continue
lowercase_ = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
lowercase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase_ = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
lowercase_ = torch.cat(snake_case__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase_ = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
lowercase_ = torch.cat(snake_case__ )
return new_state_dict
def a ( snake_case__: List[Any] ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
__a = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__a = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
__a = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
__a = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__a = load_file(unet_path, device='cpu')
else:
__a = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
__a = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
__a = load_file(vae_path, device='cpu')
else:
__a = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
__a = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
__a = load_file(text_enc_path, device='cpu')
else:
__a = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
__a = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
__a = convert_unet_state_dict(unet_state_dict)
__a = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__a = convert_vae_state_dict(vae_state_dict)
__a = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__a = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__a = {'transformer.' + k: v for k, v in text_enc_dict.items()}
__a = convert_text_enc_state_dict_vaa(text_enc_dict)
__a = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
__a = convert_text_enc_state_dict(text_enc_dict)
__a = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__a = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__a = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__a = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 97
|
"""simple docstring"""
import numpy as np
import datasets
a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def _lowerCamelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
# convert to numpy arrays
__lowercase : Dict = np.array(UpperCamelCase_ )
__lowercase : str = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
__lowercase : Tuple = X - np.mean(UpperCamelCase_ )
__lowercase : List[Any] = np.cov(reference_distribution.T )
try:
__lowercase : Tuple = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
__lowercase : str = np.linalg.pinv(UpperCamelCase_ )
__lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 76
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
A__ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !"
A__ = model(__lowerCAmelCase )['''last_hidden_state''']
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape,__lowerCAmelCase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],dtype=tf.floataa,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 710
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
a__: List[str] = None
a__: List[Any] = {
'7B': 11_008,
'13B': 13_824,
'30B': 17_920,
'65B': 22_016,
'70B': 28_672,
}
a__: List[str] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Optional[int]=2_56 )->List[str]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->List[Any]:
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] )->Dict:
with open(UpperCamelCase__ , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=True )->Optional[int]:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = os.path.join(UpperCamelCase__ , '''tmp''' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = read_json(os.path.join(UpperCamelCase__ , '''params.json''' ) )
A__ = NUM_SHARDS[model_size]
A__ = params['''n_layers''']
A__ = params['''n_heads''']
A__ = n_heads // num_shards
A__ = params['''dim''']
A__ = dim // n_heads
A__ = 10000.0
A__ = 1.0 / (base ** (torch.arange(0 , UpperCamelCase__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params['''n_kv_heads'''] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=n_heads , UpperCamelCase__ : str=dim , UpperCamelCase__ : Any=dim ):
return w.view(UpperCamelCase__ , dima // n_heads // 2 , 2 , UpperCamelCase__ ).transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(UpperCamelCase__ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
A__ = [
torch.load(os.path.join(UpperCamelCase__ , f"consolidated.{i:02d}.pth" ) , map_location='''cpu''' )
for i in range(UpperCamelCase__ )
]
A__ = 0
A__ = {'''weight_map''': {}}
for layer_i in range(UpperCamelCase__ ):
A__ = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A__ = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(UpperCamelCase__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(UpperCamelCase__ )] , dim=0 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(UpperCamelCase__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(UpperCamelCase__ )] , dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
A__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCamelCase__ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCamelCase__ )] , dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
# Write configs
A__ = {'''total_size''': param_count * 2}
write_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''pytorch_model.bin.index.json''' ) )
A__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
A__ = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
A__ = LlamaConfig(
hidden_size=UpperCamelCase__ , intermediate_size=compute_intermediate_size(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=UpperCamelCase__ , )
config.save_pretrained(UpperCamelCase__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
A__ = LlamaForCausalLM.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa , low_cpu_mem_usage=UpperCamelCase__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCamelCase__ , safe_serialization=UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] )->List[str]:
# Initialize the tokenizer based on the `spm` model
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ = tokenizer_class(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
def UpperCamelCase__( )->Tuple:
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=UpperCamelCase__ , help='''Whether or not to save using `safetensors`.''' )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
A__ = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 212
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( A__ ):
UpperCAmelCase__ = (DDPMScheduler,)
def lowerCamelCase_ ( self :Optional[Any] , **_lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : str ={
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_lowerCamelCase )
return config
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =self.scheduler_classes[0]
UpperCamelCase_ : Any =self.get_scheduler_config()
UpperCamelCase_ : Any =scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.scheduler_classes[0]
UpperCamelCase_ : List[Any] =self.get_scheduler_config()
UpperCamelCase_ : Tuple =scheduler_class(**_lowerCamelCase )
UpperCamelCase_ : Dict =len(_lowerCamelCase )
UpperCamelCase_ : int =self.dummy_model()
UpperCamelCase_ : Dict =self.dummy_sample_deter
UpperCamelCase_ : str =torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
UpperCamelCase_ : Optional[int] =model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCamelCase_ : Tuple =scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase_ : List[Any] =pred_prev_sample
UpperCamelCase_ : Union[str, Any] =torch.sum(torch.abs(_lowerCamelCase ) )
UpperCamelCase_ : Dict =torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Any =self.scheduler_classes[0]
UpperCamelCase_ : Optional[int] =self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase_ : Dict =scheduler_class(**_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =len(_lowerCamelCase )
UpperCamelCase_ : Tuple =self.dummy_model()
UpperCamelCase_ : List[Any] =self.dummy_sample_deter
UpperCamelCase_ : Tuple =torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
UpperCamelCase_ : List[Any] =model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCamelCase_ : int =scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase_ : List[Any] =pred_prev_sample
UpperCamelCase_ : Tuple =torch.sum(torch.abs(_lowerCamelCase ) )
UpperCamelCase_ : Any =torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self.scheduler_classes[0]
UpperCamelCase_ : List[str] =self.get_scheduler_config()
UpperCamelCase_ : Dict =scheduler_class(**_lowerCamelCase )
UpperCamelCase_ : str =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
UpperCamelCase_ : Optional[int] =scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
UpperCamelCase_ : int =-1
else:
UpperCamelCase_ : Dict =timesteps[i + 1]
UpperCamelCase_ : List[str] =scheduler.previous_timestep(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.scheduler_classes[0]
UpperCamelCase_ : Dict =self.get_scheduler_config()
UpperCamelCase_ : Optional[Any] =scheduler_class(**_lowerCamelCase )
UpperCamelCase_ : Any =[100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self.scheduler_classes[0]
UpperCamelCase_ : List[str] =self.get_scheduler_config()
UpperCamelCase_ : List[Any] =scheduler_class(**_lowerCamelCase )
UpperCamelCase_ : Any =[100, 87, 50, 1, 0]
UpperCamelCase_ : Dict =len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =self.scheduler_classes[0]
UpperCamelCase_ : Tuple =self.get_scheduler_config()
UpperCamelCase_ : Optional[Any] =scheduler_class(**_lowerCamelCase )
UpperCamelCase_ : Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 395
|
"""simple docstring"""
def A_ ( __lowercase = 10 ):
if not isinstance(__lowercase , __lowercase ) or n < 0:
raise ValueError('Invalid input' )
UpperCamelCase_ : int =10**n
UpperCamelCase_ : List[str] =2_84_33 * (pow(2 , 7_83_04_57 , __lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 395
| 1
|
"""simple docstring"""
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 260
|
"""simple docstring"""
from collections.abc import Callable
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = a
A__ = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
A__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
A__ = mid
else:
A__ = mid
A__ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( lowerCAmelCase__ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 260
| 1
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def A_ ( a , a , a = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
SCREAMING_SNAKE_CASE_ : Optional[int] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
SCREAMING_SNAKE_CASE_ : Dict = format_type
def A_ ( a , a , a = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
SCREAMING_SNAKE_CASE_ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase : Optional[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase : Any = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def A_ ( a ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A_ ( a , **a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_format_type_from_alias(a )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**a )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 716
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : str = AudioDiffusionPipeline(vqvae=_SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 )
SCREAMING_SNAKE_CASE_ : Any = output.audios[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 , return_dict=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
SCREAMING_SNAKE_CASE_ : List[Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : List[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
SCREAMING_SNAKE_CASE_ : Optional[int] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
SCREAMING_SNAKE_CASE_ : str = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_vqvae_and_unet
SCREAMING_SNAKE_CASE_ : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(raw_audio=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
SCREAMING_SNAKE_CASE_ : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : int = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_unet_condition
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_SCREAMING_SNAKE_CASE , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.rand((1, 1, 10) )
SCREAMING_SNAKE_CASE_ : Any = pipe(generator=_SCREAMING_SNAKE_CASE , encoding=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images[0]
SCREAMING_SNAKE_CASE_ : Dict = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch_device
SCREAMING_SNAKE_CASE_ : str = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(generator=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = output.audios[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 353
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : List[Any] = UNetaDModel
__lowerCAmelCase : str = """sample"""
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : List[str] = (3_2, 3_2)
_lowerCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
_lowerCAmelCase : List[Any] = torch.tensor([1_0] ).to(snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self ):
return (3, 3_2, 3_2)
@property
def __UpperCamelCase ( self ):
return (3, 3_2, 3_2)
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = {
"""block_out_channels""": (3_2, 6_4),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 3_2,
}
_lowerCAmelCase : Dict = self.dummy_input
return init_dict, inputs_dict
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = UNetaDModel
__lowerCAmelCase : List[str] = """sample"""
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = 4
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : Optional[int] = (3_2, 3_2)
_lowerCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
_lowerCAmelCase : str = torch.tensor([1_0] ).to(snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self ):
return (4, 3_2, 3_2)
@property
def __UpperCamelCase ( self ):
return (4, 3_2, 3_2)
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = {
"""sample_size""": 3_2,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (3_2, 6_4),
"""attention_head_dim""": 3_2,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case_ )
_lowerCAmelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case_ )
model.to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __UpperCamelCase ( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case_ )
model_accelerate.to(snake_case_ )
model_accelerate.eval()
_lowerCAmelCase : List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : Any = noise.to(snake_case_ )
_lowerCAmelCase : List[str] = torch.tensor([1_0] * noise.shape[0] ).to(snake_case_ )
_lowerCAmelCase : Tuple = model_accelerate(snake_case_ , snake_case_ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowerCAmelCase , _lowerCAmelCase : int = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case_ , low_cpu_mem_usage=snake_case_ )
model_normal_load.to(snake_case_ )
model_normal_load.eval()
_lowerCAmelCase : Union[str, Any] = model_normal_load(snake_case_ , snake_case_ )["""sample"""]
assert torch_all_close(snake_case_ , snake_case_ , rtol=1E-3 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case_ )
_lowerCAmelCase : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : Tuple = noise.to(snake_case_ )
_lowerCAmelCase : Optional[Any] = torch.tensor([1_0] * noise.shape[0] ).to(snake_case_ )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ ).sample
_lowerCAmelCase : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowerCAmelCase : Dict = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1E-3 ) )
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : int = UNetaDModel
__lowerCAmelCase : Optional[Any] = """sample"""
@property
def __UpperCamelCase ( self , snake_case_=(3_2, 3_2) ):
_lowerCAmelCase : Any = 4
_lowerCAmelCase : int = 3
_lowerCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
_lowerCAmelCase : List[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self ):
return (3, 3_2, 3_2)
@property
def __UpperCamelCase ( self ):
return (3, 3_2, 3_2)
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = {
"""block_out_channels""": [3_2, 6_4, 6_4, 6_4],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_lowerCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.dummy_input
_lowerCAmelCase : Union[str, Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(snake_case_ )
_lowerCAmelCase : Any = noise
_lowerCAmelCase : str = model(**snake_case_ )
assert image is not None, "Make sure output is not None"
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case_ )
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Any = 3
_lowerCAmelCase : Any = (2_5_6, 2_5_6)
_lowerCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(snake_case_ )
_lowerCAmelCase : Optional[int] = torch.tensor(batch_size * [1E-4] ).to(snake_case_ )
with torch.no_grad():
_lowerCAmelCase : Any = model(snake_case_ , snake_case_ ).sample
_lowerCAmelCase : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1E-2 ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : str = (3_2, 3_2)
_lowerCAmelCase : int = torch.ones((batch_size, num_channels) + sizes ).to(snake_case_ )
_lowerCAmelCase : Dict = torch.tensor(batch_size * [1E-4] ).to(snake_case_ )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(snake_case_ , snake_case_ ).sample
_lowerCAmelCase : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : Any = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1E-2 ) )
def __UpperCamelCase ( self ):
# not required for this model
pass
| 384
|
'''simple docstring'''
from math import factorial
UpperCamelCase_ = {str(digit): factorial(digit) for digit in range(10)}
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : int = 60 , _lowerCamelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
_lowerCAmelCase : Union[str, Any] = 0
# the cached sizes of the previous chains
_lowerCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , _lowerCamelCase ):
# The temporary set will contain the elements of the chain
_lowerCAmelCase : Any = set()
_lowerCAmelCase : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowerCAmelCase : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCamelCase )
chain_set_length += 1
_lowerCAmelCase : List[Any] = digit_factorial_sum(_lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowerCAmelCase : Union[str, Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 384
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a__ : Dict = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) ->None:
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 333
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = True
lowercase__ : str = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = FlaxBertModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowerCAmelCase__ = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 90
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=[1, 16, 4, 4] , lowerCAmelCase__=None , ) -> int:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCAmelCase__ , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ViTHybridModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __A ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __A ( self ) -> List[Any]:
pass
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __A ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 247
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase : Optional[Any] = TypeVar("T")
class __lowerCAmelCase ( Generic[T]):
def __init__( self: Dict , _lowerCAmelCase: T ):
lowercase :Union[str, Any] = data
lowercase :Node[T] | None = None
def __str__( self: str ):
return F"{self.data}"
class __lowerCAmelCase ( Generic[T]):
def __init__( self: Optional[int] ):
lowercase :Node[T] | None = None
def __iter__( self: List[Any] ):
lowercase :str = self.top
while node:
yield node.data
lowercase :Optional[Any] = node.next
def __str__( self: Dict ):
return "->".join([str(_lowerCAmelCase ) for item in self] )
def __len__( self: int ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return self.top is None
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: T ):
lowercase :Optional[Any] = Node(_lowerCAmelCase )
if not self.is_empty():
lowercase :Optional[int] = self.top
lowercase :Tuple = node
def SCREAMING_SNAKE_CASE ( self: int ):
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _lowerCAmelCase )
lowercase :Tuple = self.top
lowercase :Dict = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self: Any ):
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 453
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_UpperCAmelCase : List[Any] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_UpperCAmelCase : Tuple = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_UpperCAmelCase : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 453
| 1
|
def _A ( SCREAMING_SNAKE_CASE = 2_0_0_0_0_0_0 ):
UpperCAmelCase__: Union[str, Any] = [0 for i in range(n + 1 )]
UpperCAmelCase__: Optional[Any] = 1
UpperCAmelCase__: Optional[Any] = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,__lowerCamelCase ):
UpperCAmelCase__: Union[str, Any] = 1
UpperCAmelCase__: Union[str, Any] = 0
for i in range(__lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"{solution() = }")
| 113
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''deformable_detr'''
UpperCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self :List[str] , __magic_name__ :Tuple=True , __magic_name__ :str=None , __magic_name__ :Tuple=3 , __magic_name__ :List[str]=300 , __magic_name__ :Dict=1024 , __magic_name__ :Tuple=6 , __magic_name__ :Tuple=1024 , __magic_name__ :int=8 , __magic_name__ :List[Any]=6 , __magic_name__ :Tuple=1024 , __magic_name__ :List[Any]=8 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :Union[str, Any]=True , __magic_name__ :str="relu" , __magic_name__ :Optional[Any]=256 , __magic_name__ :int=0.1 , __magic_name__ :List[str]=0.0 , __magic_name__ :str=0.0 , __magic_name__ :str=0.02 , __magic_name__ :int=1.0 , __magic_name__ :List[str]=True , __magic_name__ :Union[str, Any]=False , __magic_name__ :List[Any]="sine" , __magic_name__ :Union[str, Any]="resnet50" , __magic_name__ :int=True , __magic_name__ :Any=False , __magic_name__ :List[str]=4 , __magic_name__ :Tuple=4 , __magic_name__ :Union[str, Any]=4 , __magic_name__ :Optional[int]=False , __magic_name__ :Tuple=300 , __magic_name__ :Tuple=False , __magic_name__ :List[Any]=1 , __magic_name__ :List[str]=5 , __magic_name__ :List[Any]=2 , __magic_name__ :Optional[int]=1 , __magic_name__ :Union[str, Any]=1 , __magic_name__ :List[Any]=5 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :List[str]=0.25 , __magic_name__ :str=False , **__magic_name__ :List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
a = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
a = backbone_config.get("""model_type""" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(__magic_name__ )
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = max_position_embeddings
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# deformable attributes
a = num_feature_levels
a = encoder_n_points
a = decoder_n_points
a = two_stage
a = two_stage_num_proposals
a = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
a = focal_alpha
a = disable_custom_kernels
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return self.d_model
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a = self.backbone_config.to_dict()
a = self.__class__.model_type
return output
| 468
| 0
|
import doctest
from collections import deque
import numpy as np
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :Any = [2, 1, 2, -1]
lowercase_ :Optional[Any] = [1, 2, 3, 4]
def UpperCamelCase ( self ):
lowercase_ :List[str] = len(self.first_signal )
lowercase_ :str = len(self.second_signal )
lowercase_ :int = max(UpperCamelCase_ , UpperCamelCase_ )
# create a zero matrix of max_length x max_length
lowercase_ :Any = [[0] * max_length for i in range(UpperCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase_ ):
lowercase_ :List[str] = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase_ )
for j, item in enumerate(UpperCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase_ :Tuple = np.matmul(np.transpose(UpperCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 441
|
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
assert isinstance(_a , _a ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
lowercase_ :str = f"The input value of [n={number}] has to be > 0"
raise ValueError(_a )
else:
lowercase_ :List[str] = sylvester(number - 1 )
lowercase_ :Union[str, Any] = num - 1
lowercase_ :List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 441
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''spiece.model'''}
snake_case = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
snake_case = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
snake_case = '''▁'''
class UpperCAmelCase ( __lowerCamelCase ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]="[CLS]" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : str="[SEP]" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Optional[Any]="[CLS]" , __lowerCamelCase : Tuple="[MASK]" , __lowerCamelCase : List[Any] = None , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_snake_case = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else mask_token
)
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.sp_model )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : List[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if self.remove_space:
_snake_case = " ".join(inputs.strip().split() )
else:
_snake_case = inputs
_snake_case = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
_snake_case = unicodedata.normalize('''NFKD''' , _SCREAMING_SNAKE_CASE )
_snake_case = "".join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
_snake_case = outputs.lower()
return outputs
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = self.preprocess_text(_SCREAMING_SNAKE_CASE )
_snake_case = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
_snake_case = []
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case = cur_pieces[1:]
else:
_snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Dict ):
"""simple docstring"""
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_snake_case = []
_snake_case = ""
_snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
_snake_case = True
_snake_case = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
_snake_case = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] = None , __lowerCamelCase : Tuple = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 103
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
# word like '180' or '身高' or '神'
for char in word:
a_ : Union[str, Any] = ord(_SCREAMING_SNAKE_CASE )
if not _is_chinese_char(_SCREAMING_SNAKE_CASE ):
return 0
return 1
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
a_ : int = set()
for token in tokens:
a_ : Any = len(_SCREAMING_SNAKE_CASE ) > 1 and is_chinese(_SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(_SCREAMING_SNAKE_CASE )
a_ : int = list(_SCREAMING_SNAKE_CASE )
return word_list
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :set() ) -> Dict:
if not chinese_word_set:
return bert_tokens
a_ : Dict = max([len(_SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
a_ : int = bert_tokens
a_ , a_ : int = 0, len(_SCREAMING_SNAKE_CASE )
while start < end:
a_ : List[Any] = True
if is_chinese(bert_word[start] ):
a_ : Dict = min(end - start , _SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , 1 , -1 ):
a_ : Optional[int] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ : Any = "##" + bert_word[j]
a_ : int = start + i
a_ : Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :LTP , _SCREAMING_SNAKE_CASE :BertTokenizer ) -> str:
a_ : Union[str, Any] = []
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ):
a_ : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
a_ : Union[str, Any] = [get_chinese_word(_SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
a_ : Tuple = []
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ):
a_ : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
a_ : int = []
for input_ids, chinese_word in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ : Any = []
for id in input_ids:
a_ : Any = bert_tokenizer._convert_id_to_token(_SCREAMING_SNAKE_CASE )
input_tokens.append(_SCREAMING_SNAKE_CASE )
a_ : int = add_sub_symbol(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
a_ : List[Any] = token[2:]
# save chinese tokens' pos
if len(_SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(_SCREAMING_SNAKE_CASE ) ):
ref_id.append(_SCREAMING_SNAKE_CASE )
ref_ids.append(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
return ref_ids
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Any ) -> str:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a_ : Optional[Any] = f.readlines()
a_ : Optional[int] = [line.strip() for line in data if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ : Tuple = LTP(args.ltp ) # faster in GPU device
a_ : int = BertTokenizer.from_pretrained(args.bert )
a_ : List[str] = prepare_ref(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a_ : List[str] = [json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" for ref in ref_ids]
f.writelines(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase = parser.parse_args()
main(args)
| 473
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Any = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _UpperCAmelCase ( _UpperCamelCase):
__lowercase : List[str] = "altclip_text_model"
def __init__( self , snake_case_=25_00_02 , snake_case_=10_24 , snake_case_=24 , snake_case_=16 , snake_case_=40_96 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_14 , snake_case_=1 , snake_case_=0.02 , snake_case_=0.02 , snake_case_=1E-05 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=7_68 , **snake_case_ , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_snake_case : Union[str, Any] = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : int = hidden_act
_snake_case : Any = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Dict = initializer_range
_snake_case : List[str] = initializer_factor
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Tuple = position_embedding_type
_snake_case : Optional[Any] = use_cache
_snake_case : Optional[int] = project_dim
class _UpperCAmelCase ( _UpperCamelCase):
__lowercase : List[str] = "altclip_vision_model"
def __init__( self , snake_case_=7_68 , snake_case_=30_72 , snake_case_=5_12 , snake_case_=12 , snake_case_=12 , snake_case_=3 , snake_case_=2_24 , snake_case_=32 , snake_case_="quick_gelu" , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1.0 , **snake_case_ , ):
super().__init__(**__a )
_snake_case : int = hidden_size
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = projection_dim
_snake_case : Tuple = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : List[Any] = num_channels
_snake_case : Dict = patch_size
_snake_case : Any = image_size
_snake_case : List[str] = initializer_range
_snake_case : List[str] = initializer_factor
_snake_case : Optional[Any] = attention_dropout
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Dict = hidden_act
@classmethod
def lowerCamelCase__ ( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(__a )
_snake_case : int = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
_snake_case : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__a , **__a )
class _UpperCAmelCase ( _UpperCamelCase):
__lowercase : Any = "altclip"
__lowercase : List[str] = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=7_68 , snake_case_=2.6592 , **snake_case_ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_snake_case : Tuple = kwargs.pop("text_config_dict" , __a )
_snake_case : Tuple = kwargs.pop("vision_config_dict" , __a )
super().__init__(**__a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_snake_case : int = {}
# This is the complete result when using `text_config_dict`.
_snake_case : Any = AltCLIPTextConfig(**__a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_snake_case : Optional[int] = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_snake_case : Union[str, Any] = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(__a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_snake_case : int = {}
# This is the complete result when using `vision_config_dict`.
_snake_case : List[Any] = AltCLIPVisionConfig(**__a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_snake_case : Dict = {
str(__a ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_snake_case : Optional[int] = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_snake_case : Optional[Any] = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(__a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_snake_case : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
_snake_case : Dict = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
_snake_case : Union[str, Any] = AltCLIPTextConfig(**__a )
_snake_case : Dict = AltCLIPVisionConfig(**__a )
_snake_case : Any = projection_dim
_snake_case : Union[str, Any] = logit_scale_init_value
_snake_case : Optional[Any] = 1.0
@classmethod
def lowerCamelCase__ ( cls , snake_case_ , snake_case_ , **snake_case_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.text_config.to_dict()
_snake_case : int = self.vision_config.to_dict()
_snake_case : Optional[int] = self.__class__.model_type
return output
| 707
|
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Tuple = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(A_ , *A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''single_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(A_ , A_ )
for layer in range(len(A_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''multi_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A_ , '''use_cache''' ):
return
SCREAMING_SNAKE_CASE__ = model_class(A_ ).to(A_ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**A_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(A_ , '''decoder_layers''' , A_ )
or getattr(A_ , '''num_decoder_layers''' , A_ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''num_kv_heads''' , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''d_model''' , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs['''past_key_values''']
self.assertEqual(len(A_ ) , A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs['''input_ids'''].shape
for i in range(A_ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
SCREAMING_SNAKE_CASE__ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase_ ( self ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(device=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 100
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696
| 0
|
'''simple docstring'''
import sys
from collections import defaultdict
class __A :
def __init__(self : Any ):
UpperCAmelCase_ = []
def _lowercase (self : Dict , __a : str ):
return self.node_position[vertex]
def _lowercase (self : Tuple , __a : int , __a : str ):
UpperCAmelCase_ = pos
def _lowercase (self : Optional[int] , __a : Optional[int] , __a : List[str] , __a : List[str] , __a : Tuple ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase_ = 2 * start + 1
else:
UpperCAmelCase_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase_ , UpperCAmelCase_ = heap[smallest_child], positions[smallest_child]
UpperCAmelCase_ , UpperCAmelCase_ = (
heap[start],
positions[start],
)
UpperCAmelCase_ , UpperCAmelCase_ = temp, tempa
UpperCAmelCase_ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def _lowercase (self : List[str] , __a : List[Any] , __a : Optional[Any] , __a : str , __a : int ):
UpperCAmelCase_ = position[index]
while index != 0:
UpperCAmelCase_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase_ = heap[parent]
UpperCAmelCase_ = position[parent]
self.set_position(position[parent] , __a )
else:
UpperCAmelCase_ = val
UpperCAmelCase_ = temp
self.set_position(__a , __a )
break
UpperCAmelCase_ = parent
else:
UpperCAmelCase_ = val
UpperCAmelCase_ = temp
self.set_position(__a , 0 )
def _lowercase (self : List[Any] , __a : List[str] , __a : Dict ):
UpperCAmelCase_ = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def _lowercase (self : int , __a : Tuple , __a : Any ):
UpperCAmelCase_ = positions[0]
UpperCAmelCase_ = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Heap()
UpperCAmelCase_ = [0] * len(snake_case_ )
UpperCAmelCase_ = [-1] * len(snake_case_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase_ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase_ = []
for vertex in range(len(snake_case_ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case_ )
heap.node_position.append(snake_case_ )
UpperCAmelCase_ = []
UpperCAmelCase_ = 1
UpperCAmelCase_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = distance
heap.heapify(snake_case_ , snake_case_ )
for _ in range(1 , len(snake_case_ ) ):
UpperCAmelCase_ = heap.delete_minimum(snake_case_ , snake_case_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case_ )]
):
UpperCAmelCase_ = distance
heap.bottom_to_top(
snake_case_ , heap.get_position(snake_case_ ) , snake_case_ , snake_case_ )
UpperCAmelCase_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
SCREAMING_SNAKE_CASE_: Any =int(input('Enter number of edges: ').strip())
SCREAMING_SNAKE_CASE_: int =defaultdict(list)
for _ in range(edges_number):
SCREAMING_SNAKE_CASE_: Tuple =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 415
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase_ = array[0]
UpperCAmelCase_ = False
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase_ = True
UpperCAmelCase_ = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase_ = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
UpperCAmelCase_ = temp_array
else:
i += 1
UpperCAmelCase_ = [element for element in array[1:] if element >= pivot]
UpperCAmelCase_ = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415
| 1
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A__ : Any = """src/transformers"""
A__ : Union[str, Any] = """docs/source/en/tasks"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) -> List[str]:
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Optional[int] = f.readlines()
# Find the start prompt.
__lowerCamelCase : str = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
__lowerCamelCase : List[str] = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A__ : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
A__ : int = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A__ : List[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> List[str]:
__lowerCamelCase : Dict = TASK_GUIDE_TO_MODELS[task_guide]
__lowerCamelCase : Tuple = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase_ , set() )
__lowerCamelCase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=False ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
__lowerCamelCase : Optional[Any] = get_model_list_for_task(UpperCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 13
|
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = XLMTokenizer
A_ : Dict = False
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : Dict = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase_ : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Union[str, Any] = '''lower'''
UpperCAmelCase_ : Optional[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ : str = tokens + ['''<unk>''']
UpperCAmelCase_ : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 641
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
a_ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowercase ( snake_case_ : Optional[int] ,snake_case_ : Optional[int] ,snake_case_ : List[Any] ,snake_case_ : List[Any] ,snake_case_ : List[str] ,snake_case_ : Any ) ->Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__A : Union[str, Any] = '''lm_head'''
__A : Tuple = getattr(snake_case_ ,snake_case_ )
if weight_type is not None:
__A : Optional[Any] = getattr(snake_case_ ,snake_case_ ).shape
else:
__A : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__A : Optional[Any] = value
elif weight_type == "weight_g":
__A : List[Any] = value
elif weight_type == "weight_v":
__A : Union[str, Any] = value
elif weight_type == "bias":
__A : Union[str, Any] = value
else:
__A : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowercase ( snake_case_ : Union[str, Any] ,snake_case_ : List[str] ,snake_case_ : List[str] ) ->Optional[Any]:
'''simple docstring'''
__A : Union[str, Any] = []
__A : Optional[Any] = fairseq_model.state_dict()
__A : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__A : Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,hf_model.config.feat_extract_norm == '''group''' ,)
__A : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__A : List[Any] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A : str = True
if "*" in mapped_key:
__A : Tuple = name.split(snake_case_ )[0].split('''.''' )[-2]
__A : Tuple = mapped_key.replace('''*''' ,snake_case_ )
if "weight_g" in name:
__A : Dict = '''weight_g'''
elif "weight_v" in name:
__A : Dict = '''weight_v'''
elif "bias" in name:
__A : Tuple = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : List[str] = '''weight'''
else:
__A : Optional[int] = None
set_recursively(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowercase ( snake_case_ : str ,snake_case_ : Optional[Any] ,snake_case_ : int ,snake_case_ : Tuple ,snake_case_ : int ) ->List[str]:
'''simple docstring'''
__A : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__A : List[str] = name.split('''.''' )
__A : List[Any] = int(items[0] )
__A : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__A : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__A : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__A : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__A : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Any ,snake_case_ : Optional[int]=None ,snake_case_ : List[str]=None ,snake_case_ : List[Any]=True ) ->int:
'''simple docstring'''
if config_path is not None:
__A : int = UniSpeechConfig.from_pretrained(snake_case_ )
else:
__A : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
__A : Optional[int] = Dictionary.load_from_json(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : Dict = target_dict.pad_index
__A : Any = target_dict.bos_index
__A : List[str] = target_dict.eos_index
__A : str = len(target_dict.symbols )
__A : List[Any] = os.path.join(snake_case_ ,'''vocab.json''' )
if not os.path.isdir(snake_case_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case_ ) )
return
os.makedirs(snake_case_ ,exist_ok=snake_case_ )
__A : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : Union[str, Any] = 42
__A : str = 43
with open(snake_case_ ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(snake_case_ ,snake_case_ )
__A : Optional[Any] = WavaVecaPhonemeCTCTokenizer(
snake_case_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=snake_case_ ,)
__A : Tuple = True if config.feat_extract_norm == '''layer''' else False
__A : Tuple = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=snake_case_ ,return_attention_mask=snake_case_ ,)
__A : int = WavaVecaProcessor(feature_extractor=snake_case_ ,tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__A : List[Any] = UniSpeechForCTC(snake_case_ )
else:
__A : Optional[int] = UniSpeechForPreTraining(snake_case_ )
if is_finetuned:
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
__A , __A , __A : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__A : List[str] = model[0].eval()
recursively_load_weights(snake_case_ ,snake_case_ ,snake_case_ )
hf_unispeech.save_pretrained(snake_case_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 177
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """poolformer"""
def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=16 , __lowerCamelCase=16 , __lowerCamelCase=3 , __lowerCamelCase=4.0 , __lowerCamelCase=[2, 2, 6, 2] , __lowerCamelCase=[64, 128, 320, 512] , __lowerCamelCase=[7, 3, 3, 3] , __lowerCamelCase=[4, 2, 2, 2] , __lowerCamelCase=[2, 1, 1, 1] , __lowerCamelCase=4 , __lowerCamelCase=0.0 , __lowerCamelCase="gelu" , __lowerCamelCase=True , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , **__lowerCamelCase , ):
'''simple docstring'''
__A : str = num_channels
__A : List[str] = patch_size
__A : str = stride
__A : Any = padding
__A : Any = pool_size
__A : Dict = hidden_sizes
__A : Optional[Any] = mlp_ratio
__A : Any = depths
__A : List[str] = patch_sizes
__A : Union[str, Any] = strides
__A : List[str] = num_encoder_blocks
__A : Optional[int] = drop_path_rate
__A : Union[str, Any] = hidden_act
__A : Optional[Any] = use_layer_scale
__A : List[Any] = layer_scale_init_value
__A : List[Any] = initializer_range
super().__init__(**__lowerCamelCase )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 2e-3
| 177
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MobileBertConfig.from_json_file(__UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE : List[str] = MobileBertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE : Optional[int] = load_tf_weights_in_mobilebert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() ,__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 719
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[int]] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: set ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = len(__UpperCamelCase ), len(grid[0] )
if (
min(__UpperCamelCase ,__UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE : Dict = 0
count += depth_first_search(__UpperCamelCase ,row + 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,row - 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col + 1 ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col - 1 ,__UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 508
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 410
|
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : dict ):
__A : Dict = set()
# edges = list of graph's edges
__A : Any = get_edges(UpperCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__A ,__A : List[Any] = edges.pop()
chosen_vertices.add(UpperCamelCase__ )
chosen_vertices.add(UpperCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCamelCase__ )
return chosen_vertices
def _lowercase ( UpperCamelCase__ : dict ):
__A : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 365
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: complex , __lowerCamelCase: str = "x" , __lowerCamelCase: float = 10**-10 , __lowerCamelCase: int = 1 , ):
'''simple docstring'''
lowercase_ = symbols(__lowerCamelCase )
lowercase_ = lambdify(__lowerCamelCase , __lowerCamelCase )
lowercase_ = lambdify(__lowerCamelCase , diff(__lowerCamelCase , __lowerCamelCase ) )
lowercase_ = starting_point
while True:
if diff_function(__lowerCamelCase ) != 0:
lowercase_ = prev_guess - multiplicity * func(__lowerCamelCase ) / diff_function(
__lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 601
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=16 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=14 , UpperCAmelCase=10 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=25 , UpperCAmelCase=5 , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = d_model
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = prediction_length
lowercase_ = context_length
lowercase_ = cardinality
lowercase_ = num_time_features
lowercase_ = lags_sequence
lowercase_ = embedding_dimension
lowercase_ = is_training
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = context_length
lowercase_ = prediction_length + label_length
lowercase_ = label_length
lowercase_ = moving_average
lowercase_ = autocorrelation_factor
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = config.context_length + max(config.lags_sequence )
lowercase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowercase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowercase_ = floats_tensor([self.batch_size, _past_length] )
lowercase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowercase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowercase_ = floats_tensor([self.batch_size, config.prediction_length] )
lowercase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.get_config()
lowercase_ = self.prepare_autoformer_inputs_dict(UpperCAmelCase )
return config, inputs_dict
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = AutoformerModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
lowercase_ = model(**UpperCAmelCase )
lowercase_ = outputs.encoder_last_hidden_state
lowercase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
lowercase_ = AutoformerEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = model.create_network_inputs(**UpperCAmelCase )
lowercase_ , lowercase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowercase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowercase_ = encoder(inputs_embeds=UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowercase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowercase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowercase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowercase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
lowercase_ = AutoformerDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowercase_ = decoder(
trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = AutoformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
lowercase_ , lowercase_ = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info["missing_keys"] , [] )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
@unittest.skip(reason="Model has no tokens embeddings" )
def A__ ( self ) -> int:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = inspect.signature(getattr(UpperCAmelCase , "forward" ) )
# The main input is the name of the argument after `self`
lowercase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
lowercase_ = getattr(self.model_tester , "seq_length" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "d_model" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "num_attention_heads" , UpperCAmelCase )
lowercase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowercase_ = len(UpperCAmelCase )
lowercase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# decoder attentions
lowercase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowercase_ = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase ) )
lowercase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int]="train-batch.pt" ):
'''simple docstring'''
lowercase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowerCamelCase , repo_type="dataset" )
lowercase_ = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
return batch
@require_torch
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCAmelCase )
lowercase_ = prepare_batch()
with torch.no_grad():
lowercase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowercase_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCAmelCase )
lowercase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowercase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowercase_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCAmelCase )
lowercase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowercase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowercase_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase )
lowercase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCAmelCase )
lowercase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1 ) )
| 601
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
A: Tuple = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 160
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
A: Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 160
| 1
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
SCREAMING_SNAKE_CASE = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
SCREAMING_SNAKE_CASE = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
SCREAMING_SNAKE_CASE = 0
for log in Path().glob('*.log'):
SCREAMING_SNAKE_CASE = 0
with open(log, 'r') as f:
for line in f:
SCREAMING_SNAKE_CASE = json.loads(line)
if line.get('nodeid', '') != "":
SCREAMING_SNAKE_CASE = line['nodeid']
if line.get('duration', None) is not None:
SCREAMING_SNAKE_CASE = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
SCREAMING_SNAKE_CASE = []
log.unlink()
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {}
for test in failed_tests:
SCREAMING_SNAKE_CASE = test[0].split('::')
SCREAMING_SNAKE_CASE = data[0].split('/')[-1]
if data[0] not in filesafailed:
SCREAMING_SNAKE_CASE = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
SCREAMING_SNAKE_CASE = [test[0] for test in failed_table]
SCREAMING_SNAKE_CASE = list(set(files))
# Count number of instances in failed_tests
SCREAMING_SNAKE_CASE = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
SCREAMING_SNAKE_CASE = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
SCREAMING_SNAKE_CASE = 'Too many failed tests, please see the full report in the Action results.'
SCREAMING_SNAKE_CASE = len(err) + 10
SCREAMING_SNAKE_CASE = message[: 3000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
SCREAMING_SNAKE_CASE = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
SCREAMING_SNAKE_CASE = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
SCREAMING_SNAKE_CASE = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
SCREAMING_SNAKE_CASE = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
SCREAMING_SNAKE_CASE = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
SCREAMING_SNAKE_CASE = row[0]
else:
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 283
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
SCREAMING_SNAKE_CASE = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Tuple = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
_lowercase : List[Any] = bs[:]
_lowercase : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
_lowercase : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase ,__UpperCAmelCase ) )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Tuple = set()
_lowercase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : Union[str, Any] = char
return pairs
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]="replace" , lowerCamelCase_ : List[str]="<s>" , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : List[Any]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
_lowercase : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
_lowercase : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
_lowercase : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
_lowercase : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
_lowercase : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
_lowercase : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : str = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='utf-8' ) as vocab_handle:
_lowercase : str = json.load(lowerCamelCase_ )
_lowercase : Tuple = {v: k for k, v in self.encoder.items()}
_lowercase : Any = errors # how to handle errors in decoding
_lowercase : str = bytes_to_unicode()
_lowercase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='utf-8' ) as merges_handle:
_lowercase : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
_lowercase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
_lowercase : Any = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_lowercase : Tuple = {}
_lowercase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase : str = tuple(lowerCamelCase_ )
_lowercase : Dict = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
_lowercase : List[str] = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : List[Any] = bigram
_lowercase : Optional[int] = []
_lowercase : Optional[Any] = 0
while i < len(lowerCamelCase_ ):
try:
_lowercase : int = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase : Dict = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase : Tuple = tuple(lowerCamelCase_ )
_lowercase : Dict = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
_lowercase : str = get_pairs(lowerCamelCase_ )
_lowercase : str = ' '.join(lowerCamelCase_ )
_lowercase : List[str] = word
return word
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ):
"""simple docstring"""
_lowercase : Union[str, Any] = []
for token in re.findall(self.pat , lowerCamelCase_ ):
_lowercase : List[str] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(' ' ) )
return bpe_tokens
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any ):
"""simple docstring"""
_lowercase : Dict = ''.join(lowerCamelCase_ )
_lowercase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Union[str, Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '\n' )
_lowercase : Optional[int] = 0
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowercase : int = token_index
writer.write(' '.join(lowerCamelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
_lowercase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : int = [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=False , **lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
_lowercase : Dict = ' ' + text
return (text, kwargs)
| 283
| 1
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a__ = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a__ = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
a__ = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def __UpperCAmelCase ( __a : Optional[int] ) -> int:
"""simple docstring"""
def remove_articles(__a : Tuple ):
_a : Union[str, Any] = re.compile(R'''\b(a|an|the)\b''' ,re.UNICODE )
return re.sub(__a ,''' ''' ,__a )
def white_space_fix(__a : Any ):
return " ".join(text.split() )
def remove_punc(__a : str ):
_a : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def __UpperCAmelCase ( __a : Any ,__a : int ) -> Optional[Any]:
"""simple docstring"""
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Optional[int] ) -> int:
"""simple docstring"""
_a : Dict = [any(compute_exact(__a ,__a ) for ref in refs ) for pred, refs in zip(__a ,__a )]
return (sum(__a ) / len(__a )) * 100
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Optional[int] ,__a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
_a : Any = Counter(__a )
_a : Dict = Counter(__a )
_a : Tuple = Counter()
for sgram, scount in sgramcounter.items():
_a : List[str] = scount * numref
_a : Optional[Any] = Counter(__a )
_a : Dict = Counter()
for cgram, ccount in cgramcounter.items():
_a : int = ccount * numref
# KEEP
_a : Optional[Any] = sgramcounter_rep & cgramcounter_rep
_a : Dict = keepgramcounter_rep & rgramcounter
_a : str = sgramcounter_rep & rgramcounter
_a : Any = 0
_a : Optional[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a : List[str] = 1
_a : List[str] = 1
if len(__a ) > 0:
_a : Any = keeptmpscorea / len(__a )
if len(__a ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a : List[Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a : Tuple = sgramcounter_rep - cgramcounter_rep
_a : Tuple = delgramcounter_rep - rgramcounter
_a : List[Any] = sgramcounter_rep - rgramcounter
_a : int = 0
_a : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a : int = 1
if len(__a ) > 0:
_a : Any = deltmpscorea / len(__a )
# ADDITION
_a : Union[str, Any] = set(__a ) - set(__a )
_a : str = set(__a ) & set(__a )
_a : Optional[int] = set(__a ) - set(__a )
_a : Dict = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a : Optional[int] = 1
_a : str = 1
if len(__a ) > 0:
_a : List[Any] = addtmpscore / len(__a )
if len(__a ) > 0:
_a : Optional[Any] = addtmpscore / len(__a )
_a : Dict = 0
if addscore_precision > 0 or addscore_recall > 0:
_a : Optional[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __UpperCAmelCase ( __a : int ,__a : Tuple ,__a : Optional[int] ) -> str:
"""simple docstring"""
_a : List[Any] = len(__a )
_a : int = ssent.split(''' ''' )
_a : List[Any] = csent.split(''' ''' )
_a : List[Any] = []
_a : Tuple = []
_a : Union[str, Any] = []
_a : Dict = []
_a : Dict = []
_a : Any = []
_a : List[Any] = []
_a : List[Any] = []
_a : List[Any] = []
_a : Optional[int] = []
for rsent in rsents:
_a : Dict = rsent.split(''' ''' )
_a : Optional[Any] = []
_a : Tuple = []
_a : Optional[Any] = []
ragramslist.append(__a )
for i in range(0 ,len(__a ) - 1 ):
if i < len(__a ) - 1:
_a : Any = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__a )
if i < len(__a ) - 2:
_a : List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__a )
if i < len(__a ) - 3:
_a : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__a )
ragramslist.append(__a )
ragramslist.append(__a )
ragramslist.append(__a )
for i in range(0 ,len(__a ) - 1 ):
if i < len(__a ) - 1:
_a : str = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__a )
if i < len(__a ) - 2:
_a : Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__a )
if i < len(__a ) - 3:
_a : List[str] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__a )
for i in range(0 ,len(__a ) - 1 ):
if i < len(__a ) - 1:
_a : str = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__a )
if i < len(__a ) - 2:
_a : int = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__a )
if i < len(__a ) - 3:
_a : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__a )
((_a) , (_a) , (_a)) : Union[str, Any] = SARIngram(__a ,__a ,__a ,__a )
((_a) , (_a) , (_a)) : Tuple = SARIngram(__a ,__a ,__a ,__a )
((_a) , (_a) , (_a)) : int = SARIngram(__a ,__a ,__a ,__a )
((_a) , (_a) , (_a)) : List[str] = SARIngram(__a ,__a ,__a ,__a )
_a : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a : List[str] = sum([delascore, delascore, delascore, delascore] ) / 4
_a : Optional[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
_a : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __UpperCAmelCase ( __a : str ,__a : bool = True ,__a : str = "13a" ,__a : bool = True ) -> Optional[int]:
"""simple docstring"""
if lowercase:
_a : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a : int = sacrebleu.metrics.bleu._get_tokenizer(__a )()(__a )
else:
_a : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__a )
elif tokenizer == "moses":
_a : str = sacremoses.MosesTokenizer().tokenize(__a ,return_str=__a ,escape=__a )
elif tokenizer == "penn":
_a : Dict = sacremoses.MosesTokenizer().penn_tokenize(__a ,return_str=__a )
else:
_a : Optional[int] = sentence
if not return_str:
_a : Optional[Any] = normalized_sent.split()
return normalized_sent
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> List[str]:
"""simple docstring"""
if not (len(__a ) == len(__a ) == len(__a )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a : Any = 0
for src, pred, refs in zip(__a ,__a ,__a ):
sari_score += SARIsent(normalize(__a ) ,normalize(__a ) ,[normalize(__a ) for sent in refs] )
_a : str = sari_score / len(__a )
return 100 * sari_score
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[int] ,__a : Any="exp" ,__a : str=None ,__a : str=False ,__a : Dict=False ,__a : List[str]=False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a : str = [[refs[i] for refs in references] for i in range(__a )]
_a : Union[str, Any] = sacrebleu.corpus_bleu(
__a ,__a ,smooth_method=__a ,smooth_value=__a ,force=__a ,lowercase=__a ,use_effective_order=__a ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , _a , _a , _a ) -> Optional[int]:
_a : Optional[Any] = {}
result.update({'''sari''': compute_sari(sources=_a , predictions=_a , references=_a )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_a , references=_a )} )
result.update({'''exact''': compute_em(predictions=_a , references=_a )} )
return result
| 14
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a ,'tf_padding' ) )
self.parent.assertTrue(hasattr(_a ,'depth_multiplier' ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : Dict ,_a : List[str]=13 ,_a : Union[str, Any]=3 ,_a : str=32 ,_a : List[str]=0.25 ,_a : Tuple=8 ,_a : Any=8 ,_a : Optional[int]=6 ,_a : int=32 ,_a : List[str]=True ,_a : Optional[Any]=True ,_a : int=True ,_a : Dict="relu6" ,_a : Union[str, Any]=1280 ,_a : str=0.1 ,_a : Optional[Any]=0.02 ,_a : str=True ,_a : Dict=True ,_a : List[Any]=10 ,_a : List[Any]=None ,):
'''simple docstring'''
_a : str = parent
_a : Tuple = batch_size
_a : List[str] = num_channels
_a : int = image_size
_a : Optional[int] = depth_multiplier
_a : str = depth_divisible_by
_a : int = min_depth
_a : Optional[Any] = expand_ratio
_a : str = tf_padding
_a : str = output_stride
_a : Tuple = first_layer_is_expansion
_a : Optional[Any] = finegrained_output
_a : Union[str, Any] = hidden_act
_a : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_a : List[Any] = classifier_dropout_prob
_a : int = use_labels
_a : Optional[Any] = is_training
_a : Dict = num_labels
_a : int = initializer_range
_a : Dict = scope
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
_a : Any = None
if self.use_labels:
_a : Any = ids_tensor([self.batch_size] ,self.num_labels )
_a : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,depth_divisible_by=self.depth_divisible_by ,min_depth=self.min_depth ,expand_ratio=self.expand_ratio ,output_stride=self.output_stride ,first_layer_is_expansion=self.first_layer_is_expansion ,finegrained_output=self.finegrained_output ,hidden_act=self.hidden_act ,tf_padding=self.tf_padding ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : str ,_a : Tuple ,_a : int ,_a : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
self.parent.assertEqual(
result.pooler_output.shape ,(self.batch_size, self.last_hidden_size) ,)
def __lowercase ( self : Optional[Any] ,_a : List[Any] ,_a : Any ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
_a : List[str] = self.num_labels
_a : List[Any] = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : int ,_a : Tuple ,_a : str ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.num_labels
_a : Tuple = MobileNetVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
_a : List[str] = model(_a ,labels=_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = self.prepare_config_and_inputs()
_a, _a, _a, _a : Optional[int] = config_and_inputs
_a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = MobileNetVaModelTester(self )
_a : List[str] = MobileNetVaConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Any = [*signature.parameters.keys()]
_a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(_a : Any ,_a : Tuple ,_a : Union[str, Any] ):
_a : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : str = outputs.hidden_states
_a : Any = 16
self.assertEqual(len(_a ) ,_a )
_a, _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_a ,_a ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_a )
_a : List[Any] = self.default_image_processor
_a : Any = prepare_img()
_a : List[Any] = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape ,_a )
_a : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_a : Optional[Any] = model.to(_a )
_a : Tuple = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_a : Union[str, Any] = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : str = model(**_a )
_a : Optional[Any] = outputs.logits
# verify the logits
_a : List[Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape ,_a )
_a : Union[str, Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] ,device=_a ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_a ,atol=1E-4 ) )
| 319
|
'''simple docstring'''
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_a : Optional[Any] = 1
_a : str = 1
while repunit:
_a : Union[str, Any] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_ (__a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_a : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 319
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = GPTSwaTokenizer
snake_case_ = False
snake_case_ = True
snake_case_ = False
def __lowercase ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Dict = GPTSwaTokenizer(snake_case_ ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Dict ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''This is a test'''
UpperCAmelCase__ : Optional[int] = '''This is a test'''
return input_text, output_text
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''<s>'''
UpperCAmelCase__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) ,snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) ,snake_case_ )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(snake_case_ ) ,2_000 )
def __lowercase ( self : Any ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,2_000 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = GPTSwaTokenizer(snake_case_ )
UpperCAmelCase__ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) ,[465, 287, 265, 631, 842] )
UpperCAmelCase__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
snake_case_ ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ ,[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
UpperCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(snake_case_ )
# fmt: off
self.assertListEqual(
snake_case_ ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = GPTSwaTokenizer(snake_case_ )
UpperCAmelCase__ : int = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase__ : int = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case_ ,snake_case_ ):
self.assertListEqual(tokenizer.encode_fast(snake_case_ ) ,snake_case_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case_ ,snake_case_ ):
self.assertEqual(tokenizer.decode_fast(snake_case_ ) ,snake_case_ )
@slow
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase__ : Dict = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=snake_case_ ,)
| 65
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def _a ( __lowerCAmelCase : bool , __lowerCAmelCase : bool ):
"""simple docstring"""
def run_func(__lowerCAmelCase : Optional[int] ):
@wraps(__lowerCAmelCase )
def run_in_eager_mode(*__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
@wraps(__lowerCAmelCase )
@tf.function(experimental_compile=__lowerCAmelCase )
def run_in_graph_mode(*__lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[int] ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : int = random.Random()
snake_case__ : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = "TensorFlow"
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return tf.__version__
def __magic_name__ ( self : Any , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : Optional[int] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_inference )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : int = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_train )
def __magic_name__ ( self : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
snake_case__ : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : Union[str, Any] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_inference )
def __magic_name__ ( self : str , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
snake_case__ : str = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : int = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_train )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case__ : str = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : str = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ )
snake_case__ : Tuple = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case__ : Optional[int] = TF_MODEL_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
snake_case__ : Union[str, Any] = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case__ : int = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(snake_case_ , decoder_input_ids=snake_case_ , training=snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(snake_case_ , training=snake_case_ )
snake_case__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case__ : List[Any] = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Union[str, Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : Any = __import__('''transformers''' , fromlist=[model_class] )
snake_case__ : Dict = getattr(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case__ : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
snake_case__ : Optional[Any] = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case__ : Optional[int] = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case__ : List[str] = model(snake_case_ , decoder_input_ids=snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
snake_case__ : Any = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case__ : Dict = model(snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
snake_case__ : Optional[int] = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
snake_case__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __magic_name__ ( self : int , snake_case_ : List[Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(snake_case_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case__ : Optional[int] = timeit.repeat(
snake_case_ , repeat=self.args.repeat , number=1_0 , )
return min(snake_case_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def __magic_name__ ( self : Tuple , snake_case_ : Callable[[], None] ):
'''simple docstring'''
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
snake_case__ : int = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
snake_case__ : int = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
snake_case__ : Dict = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case__ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(snake_case_ )
snake_case__ : Union[str, Any] = meminfo.used
snake_case__ : int = Memory(snake_case_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
snake_case__ : str = None
else:
snake_case__ : Union[str, Any] = measure_peak_memory_cpu(snake_case_ )
snake_case__ : int = Memory(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case__ : int = stop_memory_tracing(snake_case_ )
if memory is None:
snake_case__ : Optional[int] = summary.total
else:
snake_case__ : str = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 347
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False, False, False
@dataclass
class __magic_name__ :
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = None
# Automatically constructed
_UpperCamelCase = "dict"
_UpperCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_UpperCamelCase = field(default="Audio" ,init=lowercase_ ,repr=lowercase_ )
def __call__( self ):
return self.pa_type
def _UpperCAmelCase ( self , a__ ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(a__ , a__ ):
return {"bytes": None, "path": value}
elif isinstance(a__ , a__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase = BytesIO()
sf.write(a__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_lowerCamelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
_lowerCamelCase = BytesIO(bytes() )
sf.write(a__ , a__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _UpperCAmelCase ( self , a__ , a__ = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
_lowerCamelCase , _lowerCamelCase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
_lowerCamelCase = xsplitext(a__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
_lowerCamelCase = token_per_repo_id or {}
_lowerCamelCase = path.split('''::''' )[-1]
try:
_lowerCamelCase = string_to_dict(a__ , config.HUB_DATASETS_URL )['''repo_id''']
_lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase = None
with xopen(a__ , '''rb''' , use_auth_token=a__ ) as f:
_lowerCamelCase , _lowerCamelCase = sf.read(a__ )
else:
_lowerCamelCase , _lowerCamelCase = sf.read(a__ )
_lowerCamelCase = array.T
if self.mono:
_lowerCamelCase = librosa.to_mono(a__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase = librosa.resample(a__ , orig_sr=a__ , target_sr=self.sampling_rate )
_lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCAmelCase ( self ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def _UpperCAmelCase ( self , a__ ):
if pa.types.is_string(storage.type ):
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.binary() )
_lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.string() )
_lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
_lowerCamelCase = pa.array([Audio().encode_example(a__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_lowerCamelCase = storage.field('''bytes''' )
else:
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_lowerCamelCase = storage.field('''path''' )
else:
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.string() )
_lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(a__ , self.pa_type )
def _UpperCAmelCase ( self , a__ ):
@no_op_if_value_is_null
def path_to_bytes(a__ ):
with xopen(a__ , '''rb''' ) as f:
_lowerCamelCase = f.read()
return bytes_
_lowerCamelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCamelCase = pa.array(
[os.path.basename(a__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(a__ , self.pa_type )
| 297
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
_lowerCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_a )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCamelCase_ ( UpperCamelCase__ ):
_lowerCAmelCase : Dict = field(default=UpperCamelCase__ , metadata={'help': 'Whether to use SortishSampler or not.'} )
_lowerCAmelCase : List[Any] = field(
default=UpperCamelCase__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_lowerCAmelCase : str = field(
default=UpperCamelCase__ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
_lowerCAmelCase : Tuple = field(
default=UpperCamelCase__ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
_lowerCAmelCase : Any = field(
default=UpperCamelCase__ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE : int = v.to_dict()
return d
| 527
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
| 691
| 0
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowercase : Optional[int] = len(A__ ) - 1
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : Optional[Any] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : int ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : str = self.basis_function(A__ )
_lowercase : str = 0.0
_lowercase : Dict = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[str] = 0.01 ) -> str:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
_lowercase : int = [] # x coordinates of points to plot
_lowercase : Union[str, Any] = [] # y coordinates of points to plot
_lowercase : List[str] = 0.0
while t <= 1:
_lowercase : List[Any] = self.bezier_curve_function(A__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowercase : Optional[Any] = [i[0] for i in self.list_of_points]
_lowercase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
A__ , A__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(A__ , A__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 721
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : int = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 405
|
'''simple docstring'''
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_UpperCamelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Optional[int] = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
__lowerCamelCase : Dict = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
DownloadCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
RunCommand.register_subcommand(_lowerCamelCase )
ServeCommand.register_subcommand(_lowerCamelCase )
UserCommands.register_subcommand(_lowerCamelCase )
AddNewModelCommand.register_subcommand(_lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCamelCase )
LfsCommands.register_subcommand(_lowerCamelCase )
PTtoTFCommand.register_subcommand(_lowerCamelCase )
# Let's go
__lowerCamelCase : Optional[Any] = parser.parse_args()
if not hasattr(_lowerCamelCase , "func" ):
parser.print_help()
exit(1 )
# Run
__lowerCamelCase : Dict = args.func(_lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 366
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__A = logging.get_logger('''transformers.models.speecht5''')
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[Any] , _lowerCamelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
hf_model.apply_weight_norm()
__lowerCamelCase : Union[str, Any] = checkpoint["input_conv.weight_g"]
__lowerCamelCase : int = checkpoint["input_conv.weight_v"]
__lowerCamelCase : Dict = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__lowerCamelCase : Dict = checkpoint[F"""upsamples.{i}.1.weight_g"""]
__lowerCamelCase : Optional[int] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
__lowerCamelCase : Any = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCamelCase : List[str] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
__lowerCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
__lowerCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
__lowerCamelCase : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
__lowerCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
__lowerCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
__lowerCamelCase : Any = checkpoint["output_conv.1.weight_g"]
__lowerCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
__lowerCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Dict , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any=None , _lowerCamelCase: Optional[int]=None , ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
__lowerCamelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_lowerCamelCase )
else:
__lowerCamelCase : Tuple = SpeechTaHifiGanConfig()
__lowerCamelCase : List[Any] = SpeechTaHifiGan(_lowerCamelCase )
__lowerCamelCase : int = torch.load(_lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = np.load(_lowerCamelCase )
__lowerCamelCase : List[str] = stats[0].reshape(-1 )
__lowerCamelCase : Optional[int] = stats[1].reshape(-1 )
__lowerCamelCase : int = torch.from_numpy(_lowerCamelCase ).float()
__lowerCamelCase : List[str] = torch.from_numpy(_lowerCamelCase ).float()
model.save_pretrained(_lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 366
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """pegasus"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __A=50265 , __A=1024 , __A=12 , __A=4096 , __A=16 , __A=12 , __A=4096 , __A=16 , __A=0.0 , __A=0.0 , __A=True , __A=True , __A="gelu" , __A=1024 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=0 , __A=False , __A=0 , __A=1 , __A=1 , **__A , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
| 99
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE = logging.getLogger()
def a ():
__a = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__a = parser.parse_args()
return args.f
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def snake_case_ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(__A )
def snake_case_ ( self , __A ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(__A , """argv""" , __A ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__A , 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case_ ( self ):
__a = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(__A )
__a = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(__A )
__a = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(__A )
| 99
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : str = 16
__UpperCamelCase : List[Any] = 32
def __UpperCAmelCase ( lowercase_: Accelerator, lowercase_: int = 16 ) -> List[Any]:
"""simple docstring"""
__a = AutoTokenizer.from_pretrained('bert-base-cased' )
__a = load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase_: str ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['sentence1'], examples['sentence2'], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase_: str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a = 16
elif accelerator.mixed_precision != "no":
__a = 8
else:
__a = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__, padding='longest', max_length=SCREAMING_SNAKE_CASE__, pad_to_multiple_of=SCREAMING_SNAKE_CASE__, return_tensors='pt', )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['train'], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
__a = DataLoader(
tokenized_datasets['validation'], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase : Any = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase_: Optional[int], lowercase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', SCREAMING_SNAKE_CASE__ ) == "1":
__a = 2
# New Code #
__a = int(args.gradient_accumulation_steps )
# Initialize accelerator
__a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['lr']
__a = int(config['num_epochs'] )
__a = int(config['seed'] )
__a = int(config['batch_size'] )
__a = evaluate.load('glue', 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE__ )
__a , __a = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a = model.to(accelerator.device )
# Instantiate optimizer
__a = AdamW(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
__a = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=100, num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
__a = model(**SCREAMING_SNAKE_CASE__ )
__a = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**SCREAMING_SNAKE_CASE__ )
__a = outputs.logits.argmax(dim=-1 )
__a , __a = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__a = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps', type=SCREAMING_SNAKE_CASE__, default=1, help='The number of minibatches to be ran before gradients are accumulated.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
__a = parser.parse_args()
__a = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 270
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_UpperCamelCase : List[Any] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( A : int , A : Optional[Any] , A : Optional[Any] , A : List[Any]=None ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = XLNetConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
UpperCAmelCase_ : int = finetuning_task
UpperCAmelCase_ : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ : List[str] = XLNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
UpperCAmelCase_ : Union[str, Any] = finetuning_task
UpperCAmelCase_ : List[str] = XLNetForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Optional[int] = XLNetLMHeadModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}" )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 541
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 186
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__UpperCAmelCase = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__UpperCAmelCase = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE : Any = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[Any] = numpy_to_pil(snake_case_ )
return images
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> Any:
if images.ndim == 3:
SCREAMING_SNAKE_CASE : Optional[Any] = images[None, ...]
SCREAMING_SNAKE_CASE : Optional[int] = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 220
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[str] = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ='''dinat'''
_lowerCamelCase ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , a__ : str=4 , a__ : int=3 , a__ : Optional[int]=64 , a__ : Dict=[3, 4, 6, 5] , a__ : Tuple=[2, 4, 8, 16] , a__ : Any=7 , a__ : List[Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , a__ : Dict=3.0 , a__ : str=True , a__ : Optional[int]=0.0 , a__ : Optional[Any]=0.0 , a__ : List[str]=0.1 , a__ : int="gelu" , a__ : Optional[int]=0.02 , a__ : List[str]=1e-5 , a__ : Optional[Any]=0.0 , a__ : Optional[Any]=None , a__ : int=None , **a__ : Optional[int] , ):
super().__init__(**a__ )
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(a__ )
UpperCAmelCase = num_heads
UpperCAmelCase = kernel_size
UpperCAmelCase = dilations
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(a__ ) - 1) )
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
UpperCAmelCase, UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 51
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__snake_case: Union[str, Any] = StableUnCLIPPipeline
__snake_case: List[str] = TEXT_TO_IMAGE_PARAMS
__snake_case: str = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__snake_case: str = False
def _UpperCamelCase (self : Optional[int] ) -> int:
"""simple docstring"""
A__ = 32
A__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a , num_layers=1 , )
torch.manual_seed(0 )
A__ = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=a , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A__ = StableUnCLIPImageNormalizer(embedding_dim=a )
A__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a , layers_per_block=1 , upcast_attention=a , use_linear_projection=a , )
torch.manual_seed(0 )
A__ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=a , steps_offset=1 , )
torch.manual_seed(0 )
A__ = AutoencoderKL()
A__ = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def _UpperCamelCase (self : List[Any] , a : Optional[Any] , a : str=0 ) -> Dict:
"""simple docstring"""
if str(a ).startswith('mps' ):
A__ = torch.manual_seed(a )
else:
A__ = torch.Generator(device=a ).manual_seed(a )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase (self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=a )
def _UpperCamelCase (self : Dict ) -> str:
"""simple docstring"""
A__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=a )
@slow
@require_torch_gpu
class _snake_case( unittest.TestCase ):
def _UpperCamelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe('anime turle' , generator=a , output_type='np' )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a , a )
def _UpperCamelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 531
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase : Optional[List[bool]]
lowerCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 486
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int]=13 , __lowercase : Union[str, Any]=2 , __lowercase : Union[str, Any]=24 , __lowercase : Tuple=16 , __lowercase : Any=True , __lowercase : List[str]=True , __lowercase : List[Any]=32 , __lowercase : Tuple=5 , __lowercase : List[str]=4 , __lowercase : Optional[Any]=37 , __lowercase : Tuple="gelu" , __lowercase : Any=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Tuple=10 , __lowercase : List[str]=0.02 , __lowercase : str=None , __lowercase : Any=2 , __lowercase : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = frequency_stride
UpperCAmelCase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase_ = frequency_out_dimension * time_out_dimension
UpperCAmelCase_ = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowercase : int , __lowercase : Dict , __lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = ASTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase_ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( A_ , A_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : int , __lowercase : str , __lowercase : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = ASTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowercase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["""input_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ASTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def A_( ):
UpperCAmelCase_ = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase_ , UpperCAmelCase_ = torchaudio.load(A )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(__lowercase )
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ , UpperCAmelCase_ = prepare_audio()
UpperCAmelCase_ = audio.squeeze().numpy()
UpperCAmelCase_ = feature_extractor(__lowercase , sampling_rate=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__lowercase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __lowercase )
UpperCAmelCase_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 486
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A__ = (low + high) // 2
A__ , A__ , A__ = max_subarray(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
A__ , A__ , A__ = max_subarray(UpperCAmelCase ,mid + 1 ,UpperCAmelCase )
A__ , A__ , A__ = max_cross_sum(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ , A__ = float('-inf' ), -1
A__ , A__ = float('-inf' ), -1
A__ = 0
for i in range(UpperCAmelCase ,low - 1 ,-1 ):
summ += arr[i]
if summ > left_sum:
A__ = summ
A__ = i
A__ = 0
for i in range(mid + 1 ,high + 1 ):
summ += arr[i]
if summ > right_sum:
A__ = summ
A__ = i
return max_left, max_right, (left_sum + right_sum)
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = [randint(1 ,UpperCAmelCase ) for _ in range(UpperCAmelCase )]
A__ = time.time()
max_subarray(UpperCAmelCase ,0 ,input_size - 1 )
A__ = time.time()
return end - start
def _A ( ):
'''simple docstring'''
A__ = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
A__ = [time_max_subarray(UpperCAmelCase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(UpperCAmelCase ,UpperCAmelCase ):
print(UpperCAmelCase ,'\t\t' ,UpperCAmelCase )
plt.plot(UpperCAmelCase ,UpperCAmelCase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 531
|
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _snake_case:
def __init__(self : List[Any] , a : str , a : Any=13 , a : Optional[Any]=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : List[str]=True , a : List[Any]=True , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : List[str]=37 , a : List[str]="gelu" , a : int=0.1 , a : int=0.1 , a : str=10 , a : Tuple=0.02 , a : Union[str, Any]=3 , a : List[str]=None , a : Any=2 , ) -> Optional[int]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 2
def _UpperCamelCase (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase (self : Union[str, Any] ) -> int:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCamelCase (self : Optional[int] , a : Tuple , a : Dict , a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = DeiTModel(config=a )
model.to(a )
model.eval()
A__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase (self : Optional[int] , a : Any , a : Optional[Any] , a : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = DeiTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
A__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = DeiTForMaskedImageModeling(a )
model.to(a )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase (self : Optional[Any] , a : Optional[Any] , a : Union[str, Any] , a : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = DeiTForImageClassification(a )
model.to(a )
model.eval()
A__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = DeiTForImageClassification(a )
model.to(a )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__snake_case: Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__snake_case: int = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__snake_case: Any = False
__snake_case: Any = False
__snake_case: Any = False
def _UpperCamelCase (self : Any ) -> int:
"""simple docstring"""
A__ = DeiTModelTester(self )
A__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _UpperCamelCase (self : Dict ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _UpperCamelCase (self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCamelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCamelCase (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Dict ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _UpperCamelCase (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def _UpperCamelCase (self : Optional[int] , a : int , a : Union[str, Any] , a : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
A__ = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCamelCase (self : Any ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A__ = model_class(a )
model.to(a )
model.train()
A__ = self._prepare_for_class(a , a , return_labels=a )
A__ = model(**a ).loss
loss.backward()
def _UpperCamelCase (self : Optional[Any] ) -> int:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A__ = False
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A__ = model_class(a )
model.gradient_checkpointing_enable()
model.to(a )
model.train()
A__ = self._prepare_for_class(a , a , return_labels=a )
A__ = model(**a ).loss
loss.backward()
def _UpperCamelCase (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a ),
*get_values(a ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
A__ = problem_type['title']
A__ = problem_type['num_labels']
A__ = model_class(a )
model.to(a )
model.train()
A__ = self._prepare_for_class(a , a , return_labels=a )
if problem_type["num_labels"] > 1:
A__ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
A__ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a ) as warning_list:
A__ = model(**a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCamelCase (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DeiTModel.from_pretrained(a )
self.assertIsNotNone(a )
def _A ( ):
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _snake_case( unittest.TestCase ):
@cached_property
def _UpperCamelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
A__ = model(**a )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a )
A__ = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
A__ = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=a , return_tensors='pt' )
A__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ = model(a )
| 531
| 1
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt'''}
__A ={
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__A ={
'''facebook/esm2_t6_8M_UR50D''': 1_0_2_4,
'''facebook/esm2_t12_35M_UR50D''': 1_0_2_4,
}
def lowerCamelCase_ ( lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" ) as f:
lowerCamelCase_ = f.read().splitlines()
return [l.strip() for l in lines]
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<unk>" , lowercase="<cls>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="<eos>" , **lowercase , ) -> List[str]:
super().__init__(**lowercase )
lowerCamelCase_ = load_vocab_file(lowercase )
lowerCamelCase_ = dict(enumerate(self.all_tokens ) )
lowerCamelCase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase_ = unk_token
lowerCamelCase_ = cls_token
lowerCamelCase_ = pad_token
lowerCamelCase_ = mask_token
lowerCamelCase_ = eos_token
lowerCamelCase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
return self._id_to_token.get(lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , **lowercase ) -> int:
return text.split()
def SCREAMING_SNAKE_CASE_( self , lowercase=False ) -> List[Any]:
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
return self._id_to_token.get(lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase_ = [1] + ([0] * len(lowercase )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowercase ) + [1]
return mask
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
lowerCamelCase_ = os.path.join(lowercase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(lowercase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False ) -> int:
return super()._add_tokens(lowercase , special_tokens=lowercase )
| 313
|
import sys
from collections import defaultdict
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> int:
lowerCamelCase_ = []
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = pos
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCamelCase_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCamelCase_ = 2 * start + 1
else:
lowerCamelCase_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCamelCase_ , lowerCamelCase_ = heap[smallest_child], positions[smallest_child]
lowerCamelCase_ , lowerCamelCase_ = (
heap[start],
positions[start],
)
lowerCamelCase_ , lowerCamelCase_ = temp, tempa
lowerCamelCase_ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase )
self.top_to_bottom(lowercase , lowercase , lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = position[index]
while index != 0:
lowerCamelCase_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCamelCase_ = heap[parent]
lowerCamelCase_ = position[parent]
self.set_position(position[parent] , lowercase )
else:
lowerCamelCase_ = val
lowerCamelCase_ = temp
self.set_position(lowercase , lowercase )
break
lowerCamelCase_ = parent
else:
lowerCamelCase_ = val
lowerCamelCase_ = temp
self.set_position(lowercase , 0 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = len(lowercase ) // 2 - 1
for i in range(lowercase , -1 , -1 ):
self.top_to_bottom(lowercase , lowercase , len(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = positions[0]
lowerCamelCase_ = sys.maxsize
self.top_to_bottom(lowercase , 0 , len(lowercase ) , lowercase )
return temp
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = Heap()
lowerCamelCase_ = [0] * len(lowerCamelCase__ )
lowerCamelCase_ = [-1] * len(lowerCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCamelCase_ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCamelCase_ = []
for vertex in range(len(lowerCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCamelCase__ )
heap.node_position.append(lowerCamelCase__ )
lowerCamelCase_ = []
lowerCamelCase_ = 1
lowerCamelCase_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCamelCase_ = 0
lowerCamelCase_ = distance
heap.heapify(lowerCamelCase__ , lowerCamelCase__ )
for _ in range(1 , len(lowerCamelCase__ ) ):
lowerCamelCase_ = heap.delete_minimum(lowerCamelCase__ , lowerCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCamelCase_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCamelCase__ )]
):
lowerCamelCase_ = distance
heap.bottom_to_top(
lowerCamelCase__ , heap.get_position(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A =int(input('''Enter number of edges: ''').strip())
__A =defaultdict(list)
for _ in range(edges_number):
__A =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 313
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@staticmethod
@abstractmethod
def UpperCamelCase_ ( __lowercase : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
raise NotImplementedError()
| 225
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase__ = False
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] , __lowercase : Any=32 ):
'''simple docstring'''
set_seed(0 )
__a = UNetaDModel(sample_size=__lowercase , in_channels=3 , out_channels=3 )
__a = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__a = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__lowercase , )
__a = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__a = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__lowercase ) for _ in range(4 )]
__a = [torch.randn((4, 3, 32, 32) ).to(__lowercase ) for _ in range(4 )]
__a = [torch.randint(0 , 1000 , (4,) ).long().to(__lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
__a , __a = self.get_model_optimizer(resolution=32 )
model.train().to(__lowercase )
for i in range(4 ):
optimizer.zero_grad()
__a = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__a = model(__lowercase , timesteps[i] ).sample
__a = torch.nn.functional.mse_loss(__lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__a , __a = self.get_model_optimizer(resolution=32 )
model.train().to(__lowercase )
for i in range(4 ):
optimizer.zero_grad()
__a = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__a = model(__lowercase , timesteps[i] ).sample
__a = torch.nn.functional.mse_loss(__lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
| 225
| 1
|
'''simple docstring'''
import requests
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str):
lowerCamelCase : Dict = {'Content-Type': 'application/json'}
lowerCamelCase : Optional[int] = requests.post(UpperCAmelCase__ , json={'text': message_body} , headers=UpperCAmelCase__)
if response.status_code != 2_00:
lowerCamelCase : List[str] = (
'Request to slack returned an error '
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(UpperCAmelCase__)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 449
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
A = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
A = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def UpperCAmelCase ( UpperCAmelCase__ : Any):
lowerCamelCase : Any = list(state_dict.keys())
for name in state_dict_keys:
lowerCamelCase : List[str] = state_dict.pop(UpperCAmelCase__)
# emb -> embedding
if name.startswith('emb.'):
lowerCamelCase : Dict = name.replace('emb.' , 'embeddings.')
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0'):
lowerCamelCase : str = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln')
# att -> attention
lowerCamelCase : str = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , UpperCAmelCase__)
# ffn -> feed_forward
lowerCamelCase : List[Any] = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , UpperCAmelCase__)
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k'):
lowerCamelCase : Any = name.replace('.time_mix_k' , '.time_mix_key')
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v'):
lowerCamelCase : str = name.replace('.time_mix_v' , '.time_mix_value')
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r'):
lowerCamelCase : List[Any] = name.replace('.time_mix_r' , '.time_mix_receptance')
if name != "head.weight":
lowerCamelCase : Any = 'rwkv.' + name
lowerCamelCase : Any = weight
return state_dict
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Dict=None):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.')
lowerCamelCase : Dict = 5_02_77
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b')
else:
lowerCamelCase : int = PreTrainedTokenizerFast(tokenizer_file=UpperCAmelCase__)
lowerCamelCase : List[Any] = len(UpperCAmelCase__)
tokenizer.save_pretrained(UpperCAmelCase__)
# 2. Build the config
lowerCamelCase : Tuple = list(NUM_HIDDEN_LAYERS_MAPPING.keys())
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.')
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''')
lowerCamelCase : List[Any] = RwkvConfig(
vocab_size=UpperCAmelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCAmelCase__)
# 3. Download model file then convert state_dict
lowerCamelCase : Tuple = hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : Any = torch.load(UpperCAmelCase__ , map_location='cpu')
lowerCamelCase : str = convert_state_dict(UpperCAmelCase__)
# 4. Split in shards and save
lowerCamelCase , lowerCamelCase : Optional[int] = shard_checkpoint(UpperCAmelCase__)
for shard_file, shard in shards.items():
torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__))
if index is not None:
lowerCamelCase : Dict = os.path.join(UpperCAmelCase__ , UpperCAmelCase__)
# Save the index as well
with open(UpperCAmelCase__ , 'w' , encoding='utf-8') as f:
lowerCamelCase : int = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__) + '\n'
f.write(UpperCAmelCase__)
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.')
lowerCamelCase : List[str] = list(shards.keys())
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase : Dict = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__))
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCAmelCase__ , UpperCAmelCase__))
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.')
lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__)
model.push_to_hub(UpperCAmelCase__ , max_shard_size='2GB')
tokenizer.push_to_hub(UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
A = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 449
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_SCREAMING_SNAKE_CASE : str = True
from torch.cuda.amp import autocast
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
a__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__ : Optional[bool] = field(
default=__lowerCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
a__ : Optional[bool] = field(
default=__lowerCamelCase , metadata={'help': 'Whether to log verbose messages or not.'} , )
a__ : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
a__ : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
a__ : Optional[float] = field(
default=0.99_9995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def _lowercase ( __lowerCamelCase : ModelArguments ,__lowerCamelCase : TrainingArguments ) -> int:
'''simple docstring'''
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
UpperCamelCase__ : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
UpperCamelCase__ : Tuple = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCamelCase__ : Optional[int] = logging.INFO
logger.setLevel(__lowerCamelCase )
@dataclass
class UpperCamelCase__ :
a__ : str = field(
default=__lowerCamelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a__ : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
a__ : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
a__ : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__ : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
a__ : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class UpperCamelCase__ :
a__ : WavaVecaForPreTraining
a__ : WavaVecaFeatureExtractor
a__ : Union[bool, str] = "longest"
a__ : Optional[int] = None
a__ : Optional[int] = None
def __call__( self : Optional[Any], __lowerCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
UpperCamelCase__ : Union[str, Any] = self.feature_extractor.pad(
__lowerCamelCase, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
UpperCamelCase__ : int = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCamelCase__ : Union[str, Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCamelCase__ : Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCamelCase__ : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCamelCase__ : Tuple = 1
UpperCamelCase__ : Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCamelCase__ : Any = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__lowerCamelCase, min_masks=2, )
return batch
class UpperCamelCase__ ( __lowerCamelCase ):
def __init__( self : List[Any], *__lowerCamelCase : int, __lowerCamelCase : Union[str, Any]=1, __lowerCamelCase : str=0, __lowerCamelCase : List[Any]=1.0, **__lowerCamelCase : Optional[int] ) -> List[Any]:
super().__init__(*__lowerCamelCase, **__lowerCamelCase )
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : List[Any] = max_gumbel_temp
UpperCamelCase__ : Any = min_gumbel_temp
UpperCamelCase__ : Union[str, Any] = gumbel_temp_decay
def __lowercase( self : Any, __lowerCamelCase : nn.Module, __lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
UpperCamelCase__ : List[str] = self._prepare_inputs(__lowerCamelCase )
if self.use_amp:
with autocast():
UpperCamelCase__ : Optional[int] = self.compute_loss(__lowerCamelCase, __lowerCamelCase )
else:
UpperCamelCase__ : Union[str, Any] = self.compute_loss(__lowerCamelCase, __lowerCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase__ : Dict = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase__ : Tuple = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase__ : Optional[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCamelCase, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def _lowercase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(__lowerCamelCase ,__lowerCamelCase )
# Downloading and loading a dataset from the hub.
UpperCamelCase__ : int = load_dataset(data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ : List[str] = DatasetDict()
UpperCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' ,cache_dir=model_args.cache_dir ,)
UpperCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' ,cache_dir=model_args.cache_dir ,)
else:
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ : int = DatasetDict()
UpperCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split='''validation''' ,cache_dir=model_args.cache_dir ,)
UpperCamelCase__ : int = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=F'{data_args.train_split_name}' ,cache_dir=model_args.cache_dir ,)
# only normalized-inputs-training is supported
UpperCamelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,do_normalize=__lowerCamelCase )
def prepare_dataset(__lowerCamelCase : Tuple ):
# check that all files have the correct sampling rate
UpperCamelCase__ ,UpperCamelCase__ : int = librosa.load(batch[data_args.speech_file_column] ,sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCamelCase__ : List[str] = datasets.map(
__lowerCamelCase ,num_proc=data_args.preprocessing_num_workers ,remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCamelCase__ : str = vectorized_datasets.filter(
lambda __lowerCamelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCamelCase : str ):
return feature_extractor(batch['''speech'''] ,sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCamelCase__ : List[Any] = vectorized_datasets.map(
__lowerCamelCase ,batched=__lowerCamelCase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,remove_columns=vectorized_datasets['''train'''].column_names ,)
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCamelCase__ : Any = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,gradient_checkpointing=training_args.gradient_checkpointing ,)
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCamelCase__ : List[Any] = WavaVecaForPreTraining(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = DataCollatorForWavaVecaPretraining(model=__lowerCamelCase ,feature_extractor=__lowerCamelCase )
UpperCamelCase__ : Any = WavaVecaPreTrainer(
model=__lowerCamelCase ,data_collator=__lowerCamelCase ,args=__lowerCamelCase ,train_dataset=vectorized_datasets['''train'''] ,eval_dataset=vectorized_datasets['''validation'''] ,tokenizer=__lowerCamelCase ,max_gumbel_temp=model_args.max_gumbel_temperature ,min_gumbel_temp=model_args.min_gumbel_temperature ,gumbel_temp_decay=model_args.gumbel_temperature_decay ,)
trainer.train()
if __name__ == "__main__":
main()
| 344
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ) -> str | Literal[False]:
'''simple docstring'''
UpperCamelCase__ : Any = list(__lowerCamelCase )
UpperCamelCase__ : str = list(__lowerCamelCase )
UpperCamelCase__ : Dict = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase__ : Dict = '''_'''
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : str = []
while True:
UpperCamelCase__ : Tuple = ['''$'''] * len(__lowerCamelCase )
UpperCamelCase__ : str = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 ,len(__lowerCamelCase ) ):
UpperCamelCase__ : Optional[Any] = compare_string(binary[i] ,binary[j] )
if k is False:
UpperCamelCase__ : Any = '''*'''
UpperCamelCase__ : int = '''*'''
temp.append('''X''' )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
UpperCamelCase__ : Tuple = list(set(__lowerCamelCase ) )
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : Sequence[float] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = []
for minterm in minterms:
UpperCamelCase__ : Optional[Any] = ''''''
for _ in range(__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : int ) -> bool:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = list(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = list(__lowerCamelCase )
UpperCamelCase__ : str = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowercase ( __lowerCamelCase : list[list[int]] ,__lowerCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Union[str, Any] = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : int = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase__ : Dict = j
if count == 1:
UpperCamelCase__ : int = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[str] = -1
UpperCamelCase__ : Dict = 0
for i in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : Any = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase__ : Optional[int] = count_n
UpperCamelCase__ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : List[str] = 0
def _lowercase ( __lowerCamelCase : list[str] ,__lowerCamelCase : list[str] ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : Optional[Any] = prime_implicants[i].count('''_''' )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = 1
return chart
def _lowercase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ : int = int(input('''Enter the no. of variables\n''' ) )
UpperCamelCase__ : Optional[Any] = [
float(__lowerCamelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCamelCase__ : Dict = decimal_to_binary(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Any = check(__lowerCamelCase )
print('''Prime Implicants are:''' )
print(__lowerCamelCase )
UpperCamelCase__ : Dict = prime_implicant_chart(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Tuple = selection(__lowerCamelCase ,__lowerCamelCase )
print('''Essential Prime Implicants are:''' )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 344
| 1
|
class snake_case_ :
def __init__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = n
SCREAMING_SNAKE_CASE_ : Optional[Any] = [None] * self.n
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 # index of the first element
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
def __len__( self ):
return self.size
def __A ( self ):
return self.size == 0
def __A ( self ):
return False if self.is_empty() else self.array[self.front]
def __A ( self , __lowerCAmelCase ):
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = data
SCREAMING_SNAKE_CASE_ : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def __A ( self ):
if self.size == 0:
raise Exception('UNDERFLOW' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.array[self.front]
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = (self.front + 1) % self.n
self.size -= 1
return temp
| 311
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
import math
import os
import sys
def UpperCamelCase ( snake_case__ : str ) -> str:
UpperCamelCase : Tuple = ''
try:
with open(snake_case__ , 'rb' ) as binary_file:
UpperCamelCase : Dict = binary_file.read()
for dat in data:
UpperCamelCase : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCamelCase ( snake_case__ : dict[str, str] , snake_case__ : str , snake_case__ : int , snake_case__ : str ) -> None:
lexicon.pop(snake_case__ )
UpperCamelCase : Tuple = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
UpperCamelCase : List[str] = '0' + lexicon[curr_key]
UpperCamelCase : List[str] = bin(snake_case__ )[2:]
def UpperCamelCase ( snake_case__ : str ) -> str:
UpperCamelCase : int = {'0': '0', '1': '1'}
UpperCamelCase , UpperCamelCase : Union[str, Any] = '', ''
UpperCamelCase : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
index += 1
UpperCamelCase : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase : Tuple = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> str:
UpperCamelCase : Optional[Any] = os.path.getsize(snake_case__ )
UpperCamelCase : str = bin(snake_case__ )[2:]
UpperCamelCase : int = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> None:
UpperCamelCase : List[Any] = 8
try:
with open(snake_case__ , 'wb' ) as opened_file:
UpperCamelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> None:
UpperCamelCase : List[str] = read_file_binary(snake_case__ )
UpperCamelCase : Optional[int] = compress_data(snake_case__ )
UpperCamelCase : str = add_file_length(snake_case__ , snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 40
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
_A : Union[str, Any] = MBartConfig
_A : Tuple = {}
_A : Tuple = """gelu"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=20 , __A=2 , __A=1 , __A=0 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase = prepare_mbart_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = TFMBartModel(config=__A ).get_decoder()
__UpperCAmelCase = inputs_dict['input_ids']
__UpperCAmelCase = input_ids[:1, :]
__UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
__UpperCAmelCase = inputs_dict['head_mask']
__UpperCAmelCase = 1
# first forward pass
__UpperCAmelCase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
__UpperCAmelCase = past_key_values[1]
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , )-> Tuple:
if attention_mask is None:
__UpperCAmelCase = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_A : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_A : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_A : str = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : List[str] = True
_A : Optional[int] = False
_A : Optional[Any] = False
def __lowerCamelCase ( self , __A , __A , __A , __A , __A ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCamelCase ( self ):
__UpperCAmelCase = TFMBartModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
_A : List[str] = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
_A : Optional[Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
_A : Optional[int] = """facebook/mbart-large-en-ro"""
@cached_property
def __lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ):
__UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **__A ):
__UpperCAmelCase = self.translate_src_text(**__A )
self.assertListEqual(self.expected_text , __A )
def __lowerCamelCase ( self , **__A ):
__UpperCAmelCase = self.tokenizer(self.src_text , **__A , return_tensors='tf' )
__UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__UpperCAmelCase = self.tokenizer.batch_decode(__A , skip_special_tokens=__A )
return generated_words
@slow
def __lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 126
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A__( __magic_name__ ):
lowerCAmelCase = '''van'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = dropout_rate
| 690
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={"vocab_file": "spiece.model"}
lowerCAmelCase__ ={
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
lowerCAmelCase__ ={
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
__SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__SCREAMING_SNAKE_CASE = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token
__SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token
else:
__SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__SCREAMING_SNAKE_CASE = re.compile(
f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" )
def __getstate__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.sp_model )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return out_string
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = ''''''
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Union[str, Any] ) -> Dict[str, int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
"""simple docstring"""
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__SCREAMING_SNAKE_CASE = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 690
| 1
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =VQModel
_lowerCamelCase ="sample"
@property
def __snake_case ( self : List[str] , a__ : Optional[int]=(32, 32) ):
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
return {"sample": image}
@property
def __snake_case ( self : Any ):
return (3, 32, 32)
@property
def __snake_case ( self : Dict ):
return (3, 32, 32)
def __snake_case ( self : Dict ):
UpperCAmelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : List[Any] ):
pass
def __snake_case ( self : int ):
UpperCAmelCase, UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(a__ )
UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : List[str] ):
UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(a__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase = image.to(a__ )
with torch.no_grad():
UpperCAmelCase = model(a__ ).sample
UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
| 51
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ):
__lowercase = 2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def _lowerCAmelCase ( lowerCamelCase_ : int = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda lowerCamelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 502
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """wavlm"""
def __init__( self , UpperCAmelCase__=3_2 , UpperCAmelCase__=7_6_8 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1_2 , UpperCAmelCase__=3_0_7_2 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-5 , UpperCAmelCase__="group" , UpperCAmelCase__="gelu" , UpperCAmelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase__=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase__=False , UpperCAmelCase__=1_2_8 , UpperCAmelCase__=1_6 , UpperCAmelCase__=3_2_0 , UpperCAmelCase__=8_0_0 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=0.0_5 , UpperCAmelCase__=1_0 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0 , UpperCAmelCase__=1_0 , UpperCAmelCase__=3_2_0 , UpperCAmelCase__=2 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1_0_0 , UpperCAmelCase__=2_5_6 , UpperCAmelCase__=2_5_6 , UpperCAmelCase__=0.1 , UpperCAmelCase__="mean" , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=2_5_6 , UpperCAmelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase__=(5, 3, 3, 1, 1) , UpperCAmelCase__=(1, 2, 3, 1, 1) , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=8_0 , UpperCAmelCase__=0 , UpperCAmelCase__=1 , UpperCAmelCase__=2 , UpperCAmelCase__=False , UpperCAmelCase__=3 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
_A : str = hidden_size
_A : Optional[int] = feat_extract_norm
_A : Dict = feat_extract_activation
_A : Union[str, Any] = list(UpperCAmelCase__ )
_A : Optional[Any] = list(UpperCAmelCase__ )
_A : str = list(UpperCAmelCase__ )
_A : Tuple = conv_bias
_A : Any = num_buckets
_A : str = max_bucket_distance
_A : str = num_conv_pos_embeddings
_A : str = num_conv_pos_embedding_groups
_A : Optional[Any] = len(self.conv_dim )
_A : Optional[int] = num_hidden_layers
_A : List[Any] = intermediate_size
_A : Tuple = hidden_act
_A : Optional[Any] = num_attention_heads
_A : List[str] = hidden_dropout
_A : int = attention_dropout
_A : Optional[int] = activation_dropout
_A : Any = feat_proj_dropout
_A : Union[str, Any] = final_dropout
_A : Tuple = layerdrop
_A : int = layer_norm_eps
_A : List[str] = initializer_range
_A : int = num_ctc_classes
_A : Union[str, Any] = vocab_size
_A : Union[str, Any] = do_stable_layer_norm
_A : List[str] = use_weighted_layer_sum
_A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A : Tuple = apply_spec_augment
_A : int = mask_time_prob
_A : Any = mask_time_length
_A : int = mask_time_min_masks
_A : List[Any] = mask_feature_prob
_A : Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
_A : List[str] = num_codevectors_per_group
_A : Tuple = num_codevector_groups
_A : List[str] = contrastive_logits_temperature
_A : Optional[int] = num_negatives
_A : str = codevector_dim
_A : Dict = proj_codevector_dim
_A : Any = diversity_loss_weight
# ctc loss
_A : List[str] = ctc_loss_reduction
_A : Optional[Any] = ctc_zero_infinity
# adapter
_A : List[str] = add_adapter
_A : List[str] = adapter_kernel_size
_A : List[str] = adapter_stride
_A : Optional[int] = num_adapter_layers
_A : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A : Optional[Any] = list(UpperCAmelCase__ )
_A : Tuple = list(UpperCAmelCase__ )
_A : List[str] = list(UpperCAmelCase__ )
_A : Optional[int] = xvector_output_dim
@property
def _lowerCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 417
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = ["""pixel_values"""]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PILImageResampling.BILINEAR , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 2_5_5 , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> None:
super().__init__(**UpperCAmelCase__ )
_A : Tuple = size if size is not None else {'''shortest_edge''': 2_2_4}
_A : Any = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : Tuple = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_A : Optional[int] = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' )
_A : Union[str, Any] = do_resize
_A : int = size
_A : Union[str, Any] = resample
_A : List[str] = do_rescale
_A : Union[str, Any] = rescale_factor
_A : int = do_center_crop
_A : Union[str, Any] = crop_size
_A : int = do_flip_channel_order
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PIL.Image.BILINEAR , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
_A : Optional[Any] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_A : Any = get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
_A : List[Any] = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Optional[int]:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ) -> PIL.Image.Image:
_A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_A : Any = resample if resample is not None else self.resample
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_A : Optional[int] = size if size is not None else self.size
_A : Tuple = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : Any = crop_size if crop_size is not None else self.crop_size
_A : str = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' )
_A : Union[str, Any] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_A : Tuple = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
_A : List[str] = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
_A : Union[str, Any] = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
_A : str = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_A : Dict = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
_A : int = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_A : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Optional[int]:
_A : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
_A : int = target_sizes.numpy()
_A : Optional[int] = []
for idx in range(len(UpperCAmelCase__ ) ):
_A : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
_A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
_A : Any = logits.argmax(dim=1 )
_A : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 417
| 1
|
import math
import qiskit
def __lowerCAmelCase ( _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(_UpperCamelCase , _UpperCamelCase )
or isinstance(_UpperCamelCase , _UpperCamelCase )
or isinstance(_UpperCamelCase , _UpperCamelCase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_UpperCamelCase ) != input_a)
or (math.floor(_UpperCamelCase ) != input_a)
or (math.floor(_UpperCamelCase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
SCREAMING_SNAKE_CASE = qiskit.QuantumRegister(4 , 'qr' )
SCREAMING_SNAKE_CASE = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
SCREAMING_SNAKE_CASE = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _UpperCamelCase ) # measure the last two qbits
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend('aer_simulator' )
SCREAMING_SNAKE_CASE = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=10_00 )
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 439
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BigBirdConfig.from_json_file(_UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
SCREAMING_SNAKE_CASE = BigBirdForQuestionAnswering(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = BigBirdForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCamelCase , _UpperCamelCase , is_trivia_qa=_UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
a_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 439
| 1
|
class lowerCAmelCase__ :
def __init__( self : str , __UpperCamelCase : str = "" , __UpperCamelCase : bool = False ) -> None:
# Mapping from the first character of the prefix of the node
A = {}
# A node will be a leaf if the tree contains its word
A = is_leaf
A = prefix
def __UpperCamelCase ( self : int , __UpperCamelCase : str ) -> tuple[str, str, str]:
A = 0
for q, w in zip(self.prefix , __UpperCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : int , __UpperCamelCase : list[str] ) -> None:
for word in words:
self.insert(__UpperCamelCase )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : str ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
A = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A = RadixNode(prefix=__UpperCamelCase , is_leaf=__UpperCamelCase )
else:
A = self.nodes[word[0]]
A , A , A = incoming_node.match(
__UpperCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__UpperCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A = remaining_prefix
A = self.nodes[matching_string[0]]
A = RadixNode(__UpperCamelCase , __UpperCamelCase )
A = aux_node
if remaining_word == "":
A = True
else:
self.nodes[matching_string[0]].insert(__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : str ) -> bool:
A = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
A , A , A = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> bool:
A = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
A , A , A = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__UpperCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A = list(self.nodes.values() )[0]
A = merging_node.is_leaf
self.prefix += merging_node.prefix
A = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A = False
# If there is 1 edge, we merge it with its child
else:
A = list(incoming_node.nodes.values() )[0]
A = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A = merging_node.nodes
return True
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int = 0 ) -> None:
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase_ ( ) -> bool:
'''simple docstring'''
A = 'banana bananas bandana band apple all beast'.split()
A = RadixNode()
root.insert_many(lowerCAmelCase__ )
assert all(root.find(lowerCAmelCase__ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
A = RadixNode()
A = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCAmelCase__ )
print('Words:' , lowerCAmelCase__ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 224
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
A = []
for part_id in partition_order:
A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(100 ).repartition(1 )
A = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(10 ).repartition(2 )
A = [1, 0]
A = _generate_iterable_examples(lowerCAmelCase__ , lowerCAmelCase__ ) # Reverse the partitions.
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A , A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(10 ).repartition(1 )
A = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
A = lambda lowerCAmelCase__ : x.reverse()
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [2, 1, 0] )
A = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(100 ).repartition(1 )
A = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 224
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase__ = TypeVar("T")
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = []
__lowercase = {}
__lowercase = 0
def __len__( self : str ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : int ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase_ ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : T , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
__lowercase = self.elements
self.elements += 1
self._bubble_up(lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowercase , __lowercase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowercase , __lowercase = self.heap[0]
self._bubble_down(lowerCamelCase__ )
return elem
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : T , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
__lowercase = self.position_map[elem]
__lowercase = (elem, weight)
if position > 0:
__lowercase = get_parent_position(lowerCamelCase__ )
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCamelCase__ )
else:
self._bubble_down(lowerCamelCase__ )
else:
self._bubble_down(lowerCamelCase__ )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : T ) -> None:
"""simple docstring"""
__lowercase = self.position_map[elem]
if curr_pos == 0:
return None
__lowercase = get_parent_position(lowerCamelCase__ )
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_up(lowerCamelCase__ )
return None
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : T ) -> None:
"""simple docstring"""
__lowercase = self.position_map[elem]
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase = get_child_left_position(lowerCamelCase__ )
__lowercase = get_child_right_position(lowerCamelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
if child_left_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
else:
return None
if child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCamelCase__ , lowerCamelCase__ )
return self._bubble_down(lowerCamelCase__ )
return None
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
__lowercase = self.heap[nodea_pos][0]
__lowercase = self.heap[nodea_pos][0]
__lowercase , __lowercase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowercase = nodea_pos
__lowercase = nodea_pos
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = {}
__lowercase = 0
def __repr__( self : str ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : str ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : T ) -> None:
"""simple docstring"""
if node not in self.connections:
__lowercase = {}
self.nodes += 1
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : T , lowerCamelCase__ : T , lowerCamelCase__ : int ) -> None:
"""simple docstring"""
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
__lowercase = weight
__lowercase = weight
def _A( UpperCamelCase__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
__lowercase = {node: maxsize for node in graph.connections}
__lowercase = {node: None for node in graph.connections}
__lowercase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase__ , UpperCamelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowercase = priority_queue.extract_min()
__lowercase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
__lowercase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowercase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
__lowercase = node
return dist, parent
| 332
| 0
|
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A ='platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _a :
__a : Any = PegasusConfig
__a : Optional[int] = {}
__a : Tuple = """gelu"""
def __init__( self : Optional[Any] , lowercase : int , lowercase : List[str]=13 , lowercase : int=7 , lowercase : Tuple=True , lowercase : List[Any]=False , lowercase : Optional[Any]=99 , lowercase : Any=32 , lowercase : int=5 , lowercase : List[str]=4 , lowercase : int=37 , lowercase : List[str]=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[Any]=20 , lowercase : str=2 , lowercase : Optional[int]=1 , lowercase : List[Any]=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def A ( self : Optional[int] , lowercase : Dict , lowercase : Any , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(lowercase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase , UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
UpperCAmelCase = model.decode(lowercase , lowercase )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def A ( self : Optional[Any] , lowercase : str , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(lowercase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase , UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
UpperCAmelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def snake_case_ (_a : Dict , _a : List[str] , _a : Optional[Any] , _a : List[str]=None , _a : List[Any]=None , ):
if attention_mask is None:
UpperCAmelCase = np.not_equal(_a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _a ( __a , unittest.TestCase ):
__a : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__a : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__a : Dict = True
__a : Any = False
__a : Optional[Any] = False
__a : Tuple = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxPegasusModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase )
UpperCAmelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase : Optional[int] , lowercase : Dict=None , **lowercase : List[str] ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase : List[str] , lowercase : Union[str, Any] , lowercase : List[str] ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=lowercase )
UpperCAmelCase = np.ones((1, 1) )
UpperCAmelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
UpperCAmelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
UpperCAmelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
UpperCAmelCase = tokenizer(lowercase , return_tensors='''np''' , truncation=lowercase , max_length=512 , padding=lowercase )
UpperCAmelCase = model.generate(**lowercase , num_beams=2 ).sequences
UpperCAmelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 358
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _a ( __a ):
__a : List[Any] = """mgp-str"""
def __init__( self : str , lowercase : str=[32, 128] , lowercase : Optional[Any]=4 , lowercase : Optional[Any]=3 , lowercase : Dict=27 , lowercase : Any=38 , lowercase : int=50_257 , lowercase : List[str]=30_522 , lowercase : Optional[int]=768 , lowercase : List[Any]=12 , lowercase : Tuple=12 , lowercase : Optional[int]=4.0 , lowercase : Union[str, Any]=True , lowercase : str=False , lowercase : str=1E-5 , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : Tuple=0.0 , lowercase : str=False , lowercase : Optional[Any]=0.02 , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = max_token_length
UpperCAmelCase = num_character_labels
UpperCAmelCase = num_bpe_labels
UpperCAmelCase = num_wordpiece_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = mlp_ratio
UpperCAmelCase = distilled
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = attn_drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = output_aa_attentions
UpperCAmelCase = initializer_range
| 358
| 1
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any]=13 , UpperCamelCase : Any=7 , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=True , UpperCamelCase : List[Any]=99 , UpperCamelCase : Any=32 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : List[str]=64 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : str=0.1 , UpperCamelCase : Tuple=5_12 , UpperCamelCase : List[str]=16 , UpperCamelCase : int=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Optional[int]=3 , UpperCamelCase : Dict=4 , UpperCamelCase : List[Any]=None , UpperCamelCase : Tuple=2 , UpperCamelCase : Any=2 , UpperCamelCase : Any=2 , UpperCamelCase : Dict=2 , UpperCamelCase : List[str]=4 , UpperCamelCase : List[Any]=1 , ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : List[Any] = batch_size
_snake_case : Dict = seq_length
_snake_case : Any = is_training
_snake_case : Tuple = use_input_mask
_snake_case : str = use_token_type_ids
_snake_case : Optional[Any] = use_labels
_snake_case : Union[str, Any] = vocab_size
_snake_case : Any = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Any = hidden_act
_snake_case : int = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : List[str] = type_sequence_label_size
_snake_case : List[Any] = initializer_range
_snake_case : Union[str, Any] = num_labels
_snake_case : Any = num_choices
_snake_case : Optional[Any] = scope
_snake_case : Dict = q_groups
_snake_case : str = k_groups
_snake_case : int = v_groups
_snake_case : List[Any] = post_attention_groups
_snake_case : int = intermediate_groups
_snake_case : List[str] = output_groups
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : str = None
if self.use_input_mask:
_snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Optional[Any] = None
_snake_case : Optional[Any] = None
_snake_case : Dict = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : int = SqueezeBertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Any = model(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = SqueezeBertForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : int , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = SqueezeBertForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : List[Any] = model(
UpperCamelCase , attention_mask=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Dict = SqueezeBertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Tuple = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : List[str] = SqueezeBertForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Dict = self.num_choices
_snake_case : List[Any] = SqueezeBertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Optional[int] = model(
UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : List[Any] = config_and_inputs
_snake_case : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a_ : Optional[Any] =(
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Tuple =False
a_ : str =True
a_ : Union[str, Any] =False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = SqueezeBertModelTester(self )
_snake_case : Tuple = ConfigTester(self , config_class=UpperCamelCase , dim=37 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Any = SqueezeBertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_snake_case : Tuple = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_snake_case : Optional[Any] = model(UpperCamelCase )[0]
_snake_case : int = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase )
_snake_case : Union[str, Any] = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-4 ) )
| 411
|
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_snake_case : int = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCAmelCase )
else:
_snake_case : str = sylvester(number - 1 )
_snake_case : Optional[int] = num - 1
_snake_case : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 411
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case : int = SwinConfig()
__snake_case : List[str] = swin_name.split('_' )
__snake_case : Any = name_split[1]
__snake_case : List[str] = int(name_split[4] )
__snake_case : Optional[Any] = int(name_split[3][-1] )
if model_size == "tiny":
__snake_case : Dict = 96
__snake_case : Optional[int] = (2, 2, 6, 2)
__snake_case : Any = (3, 6, 12, 24)
elif model_size == "small":
__snake_case : List[str] = 96
__snake_case : List[Any] = (2, 2, 18, 2)
__snake_case : Any = (3, 6, 12, 24)
elif model_size == "base":
__snake_case : Tuple = 1_28
__snake_case : Tuple = (2, 2, 18, 2)
__snake_case : Dict = (4, 8, 16, 32)
else:
__snake_case : Optional[Any] = 1_92
__snake_case : Optional[int] = (2, 2, 18, 2)
__snake_case : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__snake_case : Dict = 2_18_41
else:
__snake_case : Optional[int] = 10_00
__snake_case : Union[str, Any] = 'huggingface/label-files'
__snake_case : Tuple = 'imagenet-1k-id2label.json'
__snake_case : Optional[int] = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : str = {int(A ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Union[str, Any] = img_size
__snake_case : Any = num_classes
__snake_case : int = embed_dim
__snake_case : Optional[int] = depths
__snake_case : Optional[Any] = num_heads
__snake_case : List[str] = window_size
return config
def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
__snake_case : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case : Optional[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__snake_case : int = 'encoder.' + name
if "attn.proj" in name:
__snake_case : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
__snake_case : Any = 'layernorm.weight'
if name == "norm.bias":
__snake_case : List[str] = 'layernorm.bias'
if "head" in name:
__snake_case : Union[str, Any] = name.replace('head' , 'classifier' )
else:
__snake_case : Union[str, Any] = 'swin.' + name
return name
def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__snake_case : str = orig_state_dict.pop(A )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case : Union[str, Any] = key.split('.' )
__snake_case : Any = int(key_split[1] )
__snake_case : Optional[int] = int(key_split[3] )
__snake_case : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case : List[Any] = val[:dim, :]
__snake_case : Any = val[
dim : dim * 2, :
]
__snake_case : str = val[-dim:, :]
else:
__snake_case : Dict = val[
:dim
]
__snake_case : List[str] = val[
dim : dim * 2
]
__snake_case : Tuple = val[
-dim:
]
else:
__snake_case : Dict = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( A : int , A : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = timm.create_model(A , pretrained=A )
timm_model.eval()
__snake_case : Any = get_swin_config(A )
__snake_case : Optional[int] = SwinForImageClassification(A )
model.eval()
__snake_case : List[Any] = convert_state_dict(timm_model.state_dict() , A )
model.load_state_dict(A )
__snake_case : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : int = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
__snake_case : Dict = Image.open(requests.get(A , stream=A ).raw )
__snake_case : str = image_processor(images=A , return_tensors='pt' )
__snake_case : Optional[Any] = timm_model(inputs['pixel_values'] )
__snake_case : Optional[int] = model(**A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 61
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : int = encoder_stride
__snake_case : List[str] = num_attention_outputs
__snake_case : Optional[Any] = embed_dim
__snake_case : Optional[Any] = embed_dim + 1
__snake_case : List[str] = resolution
__snake_case : Optional[int] = depths
__snake_case : List[Any] = hidden_sizes
__snake_case : List[str] = dim
__snake_case : Union[str, Any] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a)
__snake_case : int = model(__a , training=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.type_sequence_label_size
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : Optional[int] = model(__a , labels=__a , training=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : str = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFEfficientFormerModelTester(self)
__snake_case : List[Any] = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__a)
__snake_case : Union[str, Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a):
__snake_case : str = model_class(__a)
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a) , __a)
if hasattr(self.model_tester , 'encoder_seq_length'):
__snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
__snake_case : str = seq_length * self.model_tester.chunk_length
else:
__snake_case : Optional[int] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , __a)
__snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int:
"""simple docstring"""
__snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = TFEfficientFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
__snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a)
__snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
__snake_case : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
__snake_case : Dict = False
__snake_case : Optional[int] = True
__snake_case : Dict = model_class(__a)
__snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Dict = True
__snake_case : str = model_class(__a)
__snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case : Tuple = model_class(__a)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case : Tuple = model(__a)
self.assertTrue(outputs_dict is not None)
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : List[str] = model(**__a , training=__a)
# verify the logits
__snake_case : str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__snake_case : List[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : Optional[int] = model(**__a , training=__a)
# verify the logits
__snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
| 61
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_a = True if '''large''' in model_name or '''huge''' in model_name else False
_a = True if '''large''' in model_name or '''huge''' in model_name else False
_a = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_a = [3, 3, 3, 3]
_a = [5, 5, 5, 5]
elif "fl4" in model_name:
_a = [4, 4, 4, 4]
_a = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_a = [3, 3, 3, 3]
if "lrf" in model_name:
_a = [3, 3, 3, 3]
else:
_a = [2, 2, 2, 2]
if "tiny" in model_name:
_a = 96
elif "small" in model_name:
_a = 96
elif "base" in model_name:
_a = 128
elif "large" in model_name:
_a = 192
elif "xlarge" in model_name:
_a = 256
elif "huge" in model_name:
_a = 352
# set label information
_a = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_a = '''imagenet-22k-id2label.json'''
else:
_a = '''imagenet-1k-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = {v: k for k, v in idalabel.items()}
_a = FocalNetConfig(
embed_dim=UpperCamelCase , depths=UpperCamelCase , focal_levels=UpperCamelCase , focal_windows=UpperCamelCase , use_conv_embed=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase , use_post_layernorm=UpperCamelCase , use_layerscale=UpperCamelCase , )
return config
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_a = '''encoder.''' + name
if "encoder.layers" in name:
_a = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_a = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_a = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_a = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_a = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_a = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_a = '''layernorm.weight'''
if name == "norm.bias":
_a = '''layernorm.bias'''
if "head" in name:
_a = name.replace('''head''' , '''classifier''' )
else:
_a = '''focalnet.''' + name
return name
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
_a = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_a = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , UpperCamelCase )
_a = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_a = state_dict.pop(UpperCamelCase )
_a = val
_a = get_focalnet_config(UpperCamelCase )
_a = FocalNetForImageClassification(UpperCamelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase )
# verify conversion
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = BitImageProcessor(
do_resize=UpperCamelCase , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase , crop_size=224 , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , )
_a = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
_a = processor(images=UpperCamelCase , return_tensors='''pt''' )
_a = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_a = image_transforms(UpperCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase , atol=1e-4 )
_a = model(**UpperCamelCase )
_a = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_a = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
_a = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
_a = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
_a = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
_a = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
_a = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
_snake_case : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 22
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 1
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : str = TypeVar("""KEY""")
__lowerCamelCase : int = TypeVar("""VAL""")
@dataclass(frozen=UpperCamelCase_ , slots=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
"""simple docstring"""
a_ = 42
a_ = 42
class SCREAMING_SNAKE_CASE__ ( _Item ):
"""simple docstring"""
def __init__( self : Tuple ):
super().__init__(__A , __A )
def __bool__( self : Dict ):
return False
__lowerCamelCase : List[str] = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : int = 8 , __A : float = 0.7_5 ):
snake_case__ : Dict = initial_block_size
snake_case__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case__ : Optional[Any] = capacity_factor
snake_case__ : str = 0
def _lowercase ( self : List[str] , __A : KEY ):
return hash(__A ) % len(self._buckets )
def _lowercase ( self : Union[str, Any] , __A : int ):
return (ind + 1) % len(self._buckets )
def _lowercase ( self : Optional[Any] , __A : int , __A : KEY , __A : VAL ):
snake_case__ : Tuple = self._buckets[ind]
if not stored:
snake_case__ : List[Any] = _Item(__A , __A )
self._len += 1
return True
elif stored.key == key:
snake_case__ : List[str] = _Item(__A , __A )
return True
else:
return False
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__A )
def _lowercase ( self : Dict ):
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case__ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowercase ( self : Optional[int] , __A : int ):
snake_case__ : Optional[Any] = self._buckets
snake_case__ : Optional[int] = [None] * new_size
snake_case__ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _lowercase ( self : int ):
self._resize(len(self._buckets ) * 2 )
def _lowercase ( self : Union[str, Any] ):
self._resize(len(self._buckets ) // 2 )
def _lowercase ( self : int , __A : KEY ):
snake_case__ : List[Any] = self._get_bucket_index(__A )
for _ in range(len(self._buckets ) ):
yield ind
snake_case__ : int = self._get_next_ind(__A )
def _lowercase ( self : Tuple , __A : KEY , __A : VAL ):
for ind in self._iterate_buckets(__A ):
if self._try_set(__A , __A , __A ):
break
def __setitem__( self : Union[str, Any] , __A : KEY , __A : VAL ):
if self._is_full():
self._size_up()
self._add_item(__A , __A )
def __delitem__( self : List[str] , __A : KEY ):
for ind in self._iterate_buckets(__A ):
snake_case__ : str = self._buckets[ind]
if item is None:
raise KeyError(__A )
if item is _deleted:
continue
if item.key == key:
snake_case__ : str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __A : KEY ):
for ind in self._iterate_buckets(__A ):
snake_case__ : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__A )
def __len__( self : int ):
return self._len
def __iter__( self : List[str] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
snake_case__ : Union[str, Any] = " ,".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 25
|
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25
| 1
|
"""simple docstring"""
def __UpperCamelCase ( snake_case__ ):
return "".join(chr(ord(snake_case__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__="pt" ):
A_ : Dict = {"""add_prefix_space""": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(""" """ ) else {}
A_ : int = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="""max_length""" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , ):
A_ : int = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="train" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , ):
super().__init__()
A_ : str = Path(lowerCAmelCase_ ).joinpath(type_path + """.source""" )
A_ : Tuple = Path(lowerCAmelCase_ ).joinpath(type_path + """.target""" )
A_ : Optional[Any] = self.get_char_lens(self.src_file )
A_ : Optional[Any] = max_source_length
A_ : Tuple = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A_ : Tuple = tokenizer
A_ : Optional[int] = prefix
if n_obs is not None:
A_ : Union[str, Any] = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Union[str, Any] = tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , lowerCAmelCase_ ):
A_ : Optional[Any] = index + 1 # linecache starts at 1
A_ : int = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
A_ : Any = linecache.getline(str(self.tgt_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
)
A_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
A_ : str = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_source_length , """right""" )
A_ : Optional[Any] = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_target_length , """right""" )
A_ : int = source_inputs["""input_ids"""].squeeze()
A_ : int = target_inputs["""input_ids"""].squeeze()
A_ : Tuple = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase(lowerCAmelCase_ ):
return [len(lowerCAmelCase_ ) for x in Path(lowerCAmelCase_ ).open().readlines()]
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : List[str] = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[int] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ )
A_ , A_ : Dict = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
A_ : Optional[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def __UpperCamelCase ( snake_case__ ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , """git_log.json""" ) )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=4 , **snake_case__ ):
with open(snake_case__ , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def __UpperCamelCase ( snake_case__ ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def __UpperCamelCase ( ):
A_ : Optional[int] = git.Repo(search_parent_directories=snake_case__ )
A_ : Union[str, Any] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return list(map(snake_case__ , snake_case__ ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
with open(snake_case__ , """wb""" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
def remove_articles(snake_case__ ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
A_ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Tuple = normalize_answer(snake_case__ ).split()
A_ : Dict = normalize_answer(snake_case__ ).split()
A_ : int = Counter(snake_case__ ) & Counter(snake_case__ )
A_ : Dict = sum(common.values() )
if num_same == 0:
return 0
A_ : str = 1.0 * num_same / len(snake_case__ )
A_ : Any = 1.0 * num_same / len(snake_case__ )
A_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
assert len(snake_case__ ) == len(snake_case__ )
A_ : Optional[Any] = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def __UpperCamelCase ( snake_case__ ):
return model_prefix.startswith("""rag""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : List[Any] = """dropout_rate"""
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
A_ : Dict = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 180
| 1
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_A = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = '''maskformer'''
A : List[str] = {'''hidden_size''': '''mask_feature_size'''}
A : Dict = ['''resnet''', '''swin''']
A : Optional[Any] = ['''detr''']
def __init__(self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase_ : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_a , _a ):
lowercase_ : Dict = backbone_config.pop('model_type' )
lowercase_ : Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Tuple = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase_ : str = DetrConfig()
else:
# verify that the decoder is supported
lowercase_ : List[Any] = (
decoder_config.pop('model_type' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
lowercase_ : List[Any] = CONFIG_MAPPING[decoder_type]
lowercase_ : Tuple = config_class.from_dict(_a )
lowercase_ : Optional[Any] = backbone_config
lowercase_ : int = decoder_config
# main feature dimension for the model
lowercase_ : List[Any] = fpn_feature_size
lowercase_ : Tuple = mask_feature_size
# initializer
lowercase_ : Optional[int] = init_std
lowercase_ : List[str] = init_xavier_std
# Hungarian matcher && loss
lowercase_ : int = cross_entropy_weight
lowercase_ : Optional[int] = dice_weight
lowercase_ : Dict = mask_weight
lowercase_ : Tuple = use_auxiliary_loss
lowercase_ : int = no_object_weight
lowercase_ : Union[str, Any] = output_auxiliary_logits
lowercase_ : List[Any] = self.decoder_config.encoder_attention_heads
lowercase_ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def _lowerCamelCase (cls , _a , _a , **_a ) -> Union[str, Any]:
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def _lowerCamelCase (self ) -> Dict[str, any]:
lowercase_ : Optional[int] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : Optional[Any] = self.decoder_config.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 438
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
_A = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
_A = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
_A = {
'ernie-m-base': 5_1_4,
'ernie-m-large': 5_1_4,
}
_A = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = ["input_ids"]
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_INIT_CONFIGURATION
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Any = RESOURCE_FILES_NAMES
def __init__(self , _a , _a=None , _a=False , _a="utf8" , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , vocab_file=_a , encoding=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowercase_ : int = do_lower_case
lowercase_ : str = sentencepiece_model_ckpt
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase_ : Union[str, Any] = self.load_vocab(filepath=_a )
else:
lowercase_ : Any = {self.sp_model.id_to_piece(_a ): id for id in range(self.sp_model.get_piece_size() )}
lowercase_ : Optional[int] = {v: k for k, v in self.vocab.items()}
def _lowerCamelCase (self , _a ) -> Any:
if text is None:
return None
lowercase_ : Dict = self.tokenize(_a )
lowercase_ ,lowercase_ : Dict = '', []
for i, ch in enumerate(_a ):
if ch in self.SP_CHAR_MAPPING:
lowercase_ : str = self.SP_CHAR_MAPPING.get(_a )
else:
lowercase_ : List[str] = unicodedata.normalize('NFKC' , _a )
if self.is_whitespace(_a ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_a ) )
lowercase_ ,lowercase_ ,lowercase_ : int = normalized_text, [], 0
if self.do_lower_case:
lowercase_ : Optional[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase_ : Tuple = token[1:]
lowercase_ : List[str] = text[offset:].index(_a ) + offset
lowercase_ : Any = start + len(_a )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase_ : Tuple = end
return token_mapping
@property
def _lowerCamelCase (self ) -> List[Any]:
return len(self.vocab )
def _lowerCamelCase (self ) -> int:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self ) -> Optional[Any]:
lowercase_ : Dict = self.__dict__.copy()
lowercase_ : List[str] = None
return state
def __setstate__(self , _a ) -> Any:
lowercase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase_ : Tuple = {}
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowerCamelCase (self , _a ) -> str:
return "".join((self.SP_CHAR_MAPPING.get(_a , _a ) for c in text) )
def _lowerCamelCase (self , _a , _a=False , _a=64 , _a=0.1 ) -> Optional[int]:
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowercase_ : Optional[int] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowercase_ : Union[str, Any] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowercase_ : Optional[int] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowercase_ : Optional[Any] = self.sp_model.EncodeAsPieces(_a )
else:
lowercase_ : List[Any] = self.sp_model.SampleEncodeAsPieces(_a , _a , _a )
lowercase_ : int = []
for pi, piece in enumerate(_a ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_a ) and pi != 0:
new_pieces.append(_a )
continue
else:
continue
lowercase_ : Optional[Any] = 0
for i, chunk in enumerate(_a ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_a ) or self.is_punct(_a ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_a )
lowercase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ : Optional[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ : int = i
if len(_a ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowerCamelCase (self , _a ) -> Union[str, Any]:
lowercase_ : Optional[Any] = ''.join(_a ).replace(_a , ' ' ).strip()
return out_string
def _lowerCamelCase (self , _a ) -> Optional[int]:
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(_a )
lowercase_ : Tuple = ''.join(_a ).replace(_a , ' ' ).strip()
return out_string
def _lowerCamelCase (self , _a ) -> Optional[int]:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def _lowerCamelCase (self , _a ) -> int:
return self.reverse_vocab.get(_a , self.unk_token )
def _lowerCamelCase (self , _a , _a=None ) -> Any:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
lowercase_ : int = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowerCamelCase (self , _a , _a=None ) -> Optional[Any]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowerCamelCase (self , _a , _a=None , _a=False ) -> Union[str, Any]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_a ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_a ) + 1) + [1] * (len(_a ) + 3)
def _lowerCamelCase (self , _a ) -> int:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowerCamelCase (self , _a ) -> List[str]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowerCamelCase (self , _a ) -> int:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowerCamelCase (self , _a ) -> List[Any]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_a ) == 1:
lowercase_ : Optional[Any] = unicodedata.category(_a )
if cat == "Zs":
return True
return False
def _lowerCamelCase (self , _a ) -> Tuple:
lowercase_ : List[Any] = {}
with io.open(_a , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_a ):
lowercase_ : Dict = line.rstrip('\n' )
lowercase_ : Dict = int(_a )
return token_to_idx
def _lowerCamelCase (self , _a , _a = None ) -> Tuple[str]:
lowercase_ : Tuple = 0
if os.path.isdir(_a ):
lowercase_ : Any = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowercase_ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_a , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowercase_ : Tuple = token_index
writer.write(token + '\n' )
index += 1
lowercase_ : List[str] = os.path.join(_a , 'sentencepiece.bpe.model' )
with open(_a , 'wb' ) as fi:
lowercase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (vocab_file,)
| 438
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
"""simple docstring"""
snake_case__ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
snake_case__ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class snake_case_ :
"""simple docstring"""
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
snake_case__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def a_ () -> Dict:
"""simple docstring"""
__a : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__a : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowercase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : str = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__a : List[Any] = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__a : Optional[int] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a : str = train_dataset.features["label"].names
if training_args.do_eval:
__a : int = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a : List[Any] = eval_dataset.features["label"].names
if training_args.do_predict:
__a : int = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__a : List[str] = predict_dataset.features["label"].names
# Labels
__a : List[Any] = len(lowercase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel={str(lowercase_ ): label for i, label in enumerate(lowercase_ )} , labelaid={label: i for i, label in enumerate(lowercase_ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : List[str] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__a : Any = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a : Optional[Any] = False
def preprocess_function(__A ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowercase_ , max_length=data_args.max_seq_length , truncation=lowercase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__a : Optional[Any] = min(len(lowercase_ ) , data_args.max_train_samples )
__a : Optional[Any] = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__a : Union[str, Any] = train_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase_ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__a : List[Any] = min(len(lowercase_ ) , data_args.max_eval_samples )
__a : str = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__a : Dict = eval_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__a : Optional[int] = min(len(lowercase_ ) , data_args.max_predict_samples )
__a : int = predict_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
__a : Union[str, Any] = predict_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
__a : Union[str, Any] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A ):
__a : int = p.predictions[0] if isinstance(p.predictions , lowercase_ ) else p.predictions
__a : Union[str, Any] = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a : str = default_data_collator
elif training_args.fpaa:
__a : Tuple = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 )
else:
__a : List[Any] = None
# Initialize our Trainer
__a : List[str] = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
__a : str = None
if training_args.resume_from_checkpoint is not None:
__a : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : Optional[Any] = last_checkpoint
__a : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase_ )
__a : Tuple = train_result.metrics
__a : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
__a : Tuple = min(lowercase_ , len(lowercase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowercase_ )
trainer.save_metrics("train" , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__a : Union[str, Any] = trainer.evaluate(eval_dataset=lowercase_ )
__a : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
__a : Dict = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("eval" , lowercase_ )
trainer.save_metrics("eval" , lowercase_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
__a : Tuple = trainer.predict(lowercase_ , metric_key_prefix="predict" )
__a : Tuple = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase_ )
)
__a : Dict = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("predict" , lowercase_ )
trainer.save_metrics("predict" , lowercase_ )
__a : Dict = np.argmax(lowercase_ , axis=1 )
__a : List[Any] = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowercase_ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowercase_ ):
__a : int = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 351
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : Optional[int] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : List[Any] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Any = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : List[str] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : int = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Optional[int] = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Tuple = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Optional[Any] = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : List[Any] = list(state_dict.keys() )
_UpperCamelCase : int = {}
for key in keys:
_UpperCamelCase : List[str] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : List[Any] = val[:hidden_size, :]
_UpperCamelCase : List[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Tuple = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : str = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : Optional[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Optional[int] = 16
elif checkpoint == "medium":
_UpperCamelCase : int = 1_536
_UpperCamelCase : Any = 48
_UpperCamelCase : Optional[Any] = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Dict = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : List[Any] = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : int = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : int = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : int = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Tuple = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : int = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : str = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : Dict = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : List[str] = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : Any = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : List[Any] = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : List[str] = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : str = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Any = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : Optional[Any] = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Any = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 624
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
return "".join(chr(ord(__A ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__A , __A , __A=0 , __A=None ):
UpperCamelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ = (output_size, output_size) if isinstance(__A , __A ) else output_size
UpperCamelCase__ , UpperCamelCase__ = get_image_size(__A )
UpperCamelCase__ , UpperCamelCase__ = output_size
# determine new height and width
UpperCamelCase__ = output_height / input_height
UpperCamelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ = scale_width
else:
# fit height
UpperCamelCase__ = scale_height
UpperCamelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=__A )
UpperCamelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=__A )
return (new_height, new_width)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"height": 3_84, "width": 3_84}
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(
a , output_size=(size["height"], size["width"]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 223
| 0
|
def A__ ( _a : int ):
'''simple docstring'''
if not isinstance(_a , _a ):
snake_case__ : Any =f"Input value of [number={number}] must be an integer"
raise TypeError(_a )
if number < 1:
snake_case__ : List[str] =f"Input value of [number={number}] must be > 0"
raise ValueError(_a )
snake_case__ : Union[str, Any] =1
for i in range(1 , _a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385
|
def A__ ( _a : int ):
'''simple docstring'''
snake_case__ : str =generate_pascal_triangle(_a )
for row_idx in range(_a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A__ ( _a : int ):
'''simple docstring'''
if not isinstance(_a , _a ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case__ : list[list[int]] =[]
for current_row_idx in range(_a ):
snake_case__ : Optional[Any] =populate_current_row(_a , _a )
triangle.append(_a )
return triangle
def A__ ( _a : list[list[int]] , _a : int ):
'''simple docstring'''
snake_case__ : Any =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case__ , snake_case__ : List[str] =1, 1
for current_col_idx in range(1 , _a ):
calculate_current_element(
_a , _a , _a , _a )
return current_row
def A__ ( _a : list[list[int]] , _a : list[int] , _a : int , _a : int , ):
'''simple docstring'''
snake_case__ : List[Any] =triangle[current_row_idx - 1][current_col_idx - 1]
snake_case__ : Tuple =triangle[current_row_idx - 1][current_col_idx]
snake_case__ : Union[str, Any] =above_to_left_elt + above_to_right_elt
def A__ ( _a : int ):
'''simple docstring'''
if not isinstance(_a , _a ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case__ : list[list[int]] =[[1]]
for row_index in range(1 , _a ):
snake_case__ : Tuple =[0] + result[-1] + [0]
snake_case__ : Optional[Any] =row_index + 1
# Calculate the number of distinct elements in a row
snake_case__ : int =sum(divmod(_a , 2 ) )
snake_case__ : List[str] =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case__ : List[str] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case__ : Optional[int] =row_first_half + row_second_half
result.append(_a )
return result
def A__ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
snake_case__ : List[str] =f"{func.__name__}({value})"
snake_case__ : Tuple =timeit(f"__main__.{call}" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 385
| 1
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = word.split()
def justify(_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any ) -> str:
_UpperCAmelCase = max_width - width
_UpperCAmelCase = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_UpperCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_UpperCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_UpperCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_UpperCAmelCase = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
_UpperCAmelCase = [word], len(_lowercase )
_UpperCAmelCase = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A : Any = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
__A : Optional[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
__A : Tuple = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : List[Any] )->Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase__ ( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] )->Tuple:
_UpperCAmelCase = 0.0
for i, j in zip(__UpperCamelCase , __UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__UpperCamelCase , __UpperCamelCase ) else 0.0
_UpperCAmelCase = n_correct / len(__UpperCamelCase )
return {
"accuracy": accuracy,
}
| 95
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.