code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray:
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
UpperCamelCase_: Any = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
UpperCamelCase_: int = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
UpperCamelCase_: Dict = pseudo_inv
if a_inv is None:
try:
UpperCamelCase_: Optional[Any] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: Dict = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: Tuple = np.array([[2, 1], [6, 3]] )
UpperCamelCase_: Tuple = schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.block([[a, b], [b.T, c]] )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: Dict = np.linalg.det(_lowerCamelCase )
self.assertAlmostEqual(_lowerCamelCase , det_a * det_s )
def _a ( self ):
UpperCamelCase_: int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: str = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 57
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : str , UpperCamelCase : int ):
UpperCAmelCase : List[Any] = word.split()
def justify(UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) -> str:
UpperCAmelCase : List[Any] = max_width - width
UpperCAmelCase : Any = len(UpperCamelCase )
if len(UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase : Optional[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase : List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase : Tuple = []
for i in range(UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : list[str] = []
UpperCAmelCase : Union[str, Any] = 0
for word in words:
if width + len(UpperCamelCase ) + len(UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase )
width += len(UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
# reset new line and new width
UpperCAmelCase , UpperCAmelCase : Tuple = [word], len(UpperCamelCase )
UpperCAmelCase : Optional[Any] = max_width - width - len(UpperCamelCase )
answer.append(""" """.join(UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 160
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCamelCase__ : Any =json.loads(open(__lowerCamelCase ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowerCamelCase__ : List[str] =args.output + '''.pt'''
lowerCamelCase__ : Tuple =OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCamelCase__ : Union[str, Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase__ : List[str] =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase__ : int =reader.get_tensor(__lowerCamelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCamelCase__ : Dict =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCamelCase__ : int =8
lowerCamelCase__ : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase__ : Optional[Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple =torch.tensor(__lowerCamelCase )
elif key_name.startswith('''model/moe''' ):
lowerCamelCase__ : List[Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCamelCase__ : Any ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCamelCase__ : List[str] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCamelCase__ : Optional[int] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : int =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCamelCase__ : List[Any] =key_name[-9:-7]
for i in range(16 ):
lowerCamelCase__ : Optional[int] ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCamelCase__ : List[str] =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase__ : str =torch.tensor(__lowerCamelCase )
elif key_name.startswith('''model/mlp''' ):
lowerCamelCase__ : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCamelCase__ : List[str] ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCamelCase__ : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/p1/bias''' ):
lowerCamelCase__ : int ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCamelCase__ : Optional[int] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Optional[int] =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/p2/kernel''' ):
lowerCamelCase__ : str ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCamelCase__ : Optional[int] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/p2/bias''' ):
lowerCamelCase__ : List[str] ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCamelCase__ : Tuple =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase )
elif key_name.startswith('''model/ln''' ):
lowerCamelCase__ : Dict =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : Tuple ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCamelCase__ : Dict =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : str =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : Any ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCamelCase__ : List[Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase )
elif key_name.startswith('''model/att''' ):
lowerCamelCase__ : Any =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCamelCase__ : List[Any] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase__ : Optional[Any] =state[:, 0, :, :]
lowerCamelCase__ : Dict =state[:, 1, :, :]
lowerCamelCase__ : Union[str, Any] =state[:, 2, :, :]
lowerCamelCase__ : int =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Union[str, Any] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCamelCase__ : int =torch.tensor(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCamelCase__ : List[str] =torch.tensor(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/o/kernel''' ):
lowerCamelCase__ : List[str] ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCamelCase__ : Any =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] =torch.tensor(__lowerCamelCase )
elif key_name.startswith('''model/an''' ):
lowerCamelCase__ : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : Dict ='''model.blocks.%d.self_attn.norm.bias''' % player
lowerCamelCase__ : str =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Union[str, Any] =torch.tensor(__lowerCamelCase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : Union[str, Any] ='''model.blocks.%d.self_attn.norm.weight''' % player
lowerCamelCase__ : List[Any] =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCamelCase__ : Union[str, Any] ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCamelCase__ : int ='''model.%s.weight''' % nlayer
lowerCamelCase__ : List[Any] =vnp.copy() # same in embedded
lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase )
if key_name.startswith('''model/wte''' ):
lowerCamelCase__ : Union[str, Any] ='''lm_head.weight'''
lowerCamelCase__ : int =vnp.copy() # same in embedded
lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase )
elif key_name.startswith('''model/wob''' ):
lowerCamelCase__ : List[Any] ='''final_logits_bias'''
lowerCamelCase__ : List[Any] =vnp.copy() # same in embedded
lowerCamelCase__ : List[str] =state.reshape((1, -1) )
lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase )
elif key_name == "model/dense/kernel":
lowerCamelCase__ : str ='''model.last_project.weight'''
lowerCamelCase__ : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase )
elif key_name == "model/dense_1/bias":
lowerCamelCase__ : str ='''model.last_project.bias'''
lowerCamelCase__ : Tuple =vnp.copy() # same because it is one dimensional
lowerCamelCase__ : str =torch.tensor(__lowerCamelCase )
torch.save(__lowerCamelCase , args.output )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
_lowercase : List[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 625
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625
| 1
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
a = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
a = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__SCREAMING_SNAKE_CASE = new_id
# turn into Numpy arrays
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )
if reduce_labels:
__SCREAMING_SNAKE_CASE = 255
__SCREAMING_SNAKE_CASE = label - 1
__SCREAMING_SNAKE_CASE = 255
__SCREAMING_SNAKE_CASE = label != ignore_index
__SCREAMING_SNAKE_CASE = np.not_equal(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pred_label[mask]
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )[mask]
__SCREAMING_SNAKE_CASE = pred_label[pred_label == label]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = total_intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# compute metrics
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = total_area_intersect.sum() / total_area_label.sum()
__SCREAMING_SNAKE_CASE = total_area_intersect / total_area_union
__SCREAMING_SNAKE_CASE = total_area_intersect / total_area_label
__SCREAMING_SNAKE_CASE = np.nanmean(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.nanmean(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = all_acc
__SCREAMING_SNAKE_CASE = iou
__SCREAMING_SNAKE_CASE = acc
if nan_to_num is not None:
__SCREAMING_SNAKE_CASE = {metric: np.nan_to_num(__UpperCAmelCase , nan=__UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) ,reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] ,)
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : bool ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[Dict[int, int]] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = mean_iou(
results=lowerCamelCase ,gt_seg_maps=lowerCamelCase ,num_labels=lowerCamelCase ,ignore_index=lowerCamelCase ,nan_to_num=lowerCamelCase ,label_map=lowerCamelCase ,reduce_labels=lowerCamelCase ,)
return iou_result
| 109
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase__ : Tuple = random.Random()
def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase_ : Optional[int] = global_rng
lowerCAmelCase_ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=7 , SCREAMING_SNAKE_CASE_ : List[str]=4_0_0 , SCREAMING_SNAKE_CASE_ : int=2_0_0_0 , SCREAMING_SNAKE_CASE_ : str=1_0 , SCREAMING_SNAKE_CASE_ : List[str]=1_6_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=4_0_0_0 , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Tuple=True , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : str = min_seq_length
lowerCAmelCase_ : str = max_seq_length
lowerCAmelCase_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : int = padding_value
lowerCAmelCase_ : Optional[Any] = sampling_rate
lowerCAmelCase_ : Optional[Any] = return_attention_mask
lowerCAmelCase_ : Optional[int] = do_normalize
lowerCAmelCase_ : Optional[Any] = feature_size
lowerCAmelCase_ : Tuple = chunk_length
lowerCAmelCase_ : Tuple = hop_length
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : List[Any]=False ):
def _flatten(SCREAMING_SNAKE_CASE_ : List[str] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
lowerCAmelCase_ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : Any = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[str] = WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = feat_extract_first.to_dict()
lowerCAmelCase_ : int = feat_extract_second.to_dict()
lowerCAmelCase_ : int = feat_extract_first.mel_filters
lowerCAmelCase_ : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = feat_extract_first.to_dict()
lowerCAmelCase_ : Dict = feat_extract_second.to_dict()
lowerCAmelCase_ : int = feat_extract_first.mel_filters
lowerCAmelCase_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase_ : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase_ : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase_ : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCAmelCase_ : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
lowerCAmelCase_ : List[str] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
lowerCAmelCase_ : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase_ : Dict = np.asarray(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
lowerCAmelCase_ : Any = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test truncation required
lowerCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
lowerCAmelCase_ : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
lowerCAmelCase_ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase_ : List[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs_truncated]
lowerCAmelCase_ : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
lowerCAmelCase_ : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
import torch
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Dict = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCAmelCase_ : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : Any = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase_ : List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase_ : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase_ : Any = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# fmt: off
lowerCAmelCase_ : List[Any] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowerCAmelCase_ : Optional[int] = self._load_datasamples(1 )
lowerCAmelCase_ : List[Any] = WhisperFeatureExtractor()
lowerCAmelCase_ : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Optional[Any] = self._load_datasamples(1 )[0]
lowerCAmelCase_ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
lowerCAmelCase_ : Dict = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ ) - 1 ) < 1E-3 ) )
| 709
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : int ):
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Union[str, Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
lowerCAmelCase_ : int = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : str ):
import apache_beam as beam
lowerCAmelCase_ : int = beam.io.parquetio.WriteToParquet
lowerCAmelCase_ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Dict = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
lowerCAmelCase_ : str = partial(SCREAMING_SNAKE_CASE_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
lowerCAmelCase_ : List[Any] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , SCREAMING_SNAKE_CASE_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Dict = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Union[str, Any] = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
lowerCAmelCase_ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 317
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['input_features', 'is_longer']
def __init__( self , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=48000 , SCREAMING_SNAKE_CASE_=480 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 14000 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fusion" , SCREAMING_SNAKE_CASE_ = "repeatpad" , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = top_db
lowerCamelCase_ = truncation
lowerCamelCase_ = padding
lowerCamelCase_ = fft_window_size
lowerCamelCase_ = (fft_window_size >> 1) + 1
lowerCamelCase_ = hop_length
lowerCamelCase_ = max_length_s
lowerCamelCase_ = max_length_s * sampling_rate
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = frequency_min
lowerCamelCase_ = frequency_max
lowerCamelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm=SCREAMING_SNAKE_CASE_ , mel_scale='htk' , )
lowerCamelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase_ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=SCREAMING_SNAKE_CASE_ , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ = [0]
# randomly choose index for each part
lowerCamelCase_ = np.random.choice(ranges[0] )
lowerCamelCase_ = np.random.choice(ranges[1] )
lowerCamelCase_ = np.random.choice(ranges[2] )
lowerCamelCase_ = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase_ = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase_ = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase_ = torch.tensor(mel[None, None, :] )
lowerCamelCase_ = torch.nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = mel_shrink[0][0].numpy()
lowerCamelCase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) - max_length
lowerCamelCase_ = np.random.randint(0 , overflow + 1 )
lowerCamelCase_ = waveform[idx : idx + max_length]
lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters )
lowerCamelCase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase_ = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase_ = False
else:
lowerCamelCase_ = self._random_mel_fusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCamelCase_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase_ = int(max_length / len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase_ = int(max_length / len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = np.pad(SCREAMING_SNAKE_CASE_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters )
lowerCamelCase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase_ = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature:
'''simple docstring'''
lowerCamelCase_ = truncation if truncation is not None else self.truncation
lowerCamelCase_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase_ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase_ = [
self._get_input_mel(SCREAMING_SNAKE_CASE_ , max_length if max_length else self.nb_max_samples , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for waveform in raw_speech
]
lowerCamelCase_ = []
lowerCamelCase_ = []
for mel, longer in padded_inputs:
input_mel.append(SCREAMING_SNAKE_CASE_ )
is_longer.append(SCREAMING_SNAKE_CASE_ )
if truncation == "fusion" and sum(SCREAMING_SNAKE_CASE_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase_ = np.random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = True
if isinstance(input_mel[0] , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase_ = [[longer] for longer in is_longer]
lowerCamelCase_ = {'input_features': input_mel, 'is_longer': is_longer}
lowerCamelCase_ = BatchFeature(SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
lowerCamelCase_ = input_features.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return input_features
| 42
|
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = XCLIPTextConfig()
# derive patch size from model name
lowercase__ : Dict = model_name.find('''patch''' )
lowercase__ : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
lowercase__ : str = XCLIPVisionConfig(patch_size=UpperCAmelCase , num_frames=UpperCAmelCase )
if "large" in model_name:
lowercase__ : Union[str, Any] = 768
lowercase__ : List[str] = 3072
lowercase__ : Dict = 12
lowercase__ : List[str] = 1024
lowercase__ : List[Any] = 4096
lowercase__ : Dict = 16
lowercase__ : str = 24
lowercase__ : Any = 768
lowercase__ : List[str] = 3072
if model_name == "xclip-large-patch14-16-frames":
lowercase__ : List[Any] = 336
lowercase__ : int = XCLIPConfig.from_text_vision_configs(UpperCAmelCase , UpperCAmelCase )
if "large" in model_name:
lowercase__ : Any = 768
return config
def __UpperCamelCase ( UpperCAmelCase ):
# text encoder
if name == "token_embedding.weight":
lowercase__ : Dict = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
lowercase__ : int = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
lowercase__ : Tuple = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
lowercase__ : Union[str, Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
lowercase__ : Optional[int] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
lowercase__ : Optional[int] = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
lowercase__ : str = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
lowercase__ : str = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
lowercase__ : Dict = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
lowercase__ : Tuple = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
lowercase__ : List[Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
lowercase__ : int = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
lowercase__ : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
lowercase__ : Any = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
lowercase__ : Tuple = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
lowercase__ : List[Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
lowercase__ : Tuple = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
lowercase__ : Dict = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
lowercase__ : Tuple = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
lowercase__ : Union[str, Any] = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
lowercase__ : Any = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
lowercase__ : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(UpperCAmelCase )
if "attn.in_proj" in key:
lowercase__ : str = key.split('''.''' )
if key.startswith('''visual''' ):
lowercase__ : Any = key_split[3]
lowercase__ : List[str] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase__ : Dict = val[
:dim, :
]
lowercase__ : Dict = val[
dim : dim * 2, :
]
lowercase__ : Dict = val[
-dim:, :
]
else:
lowercase__ : Optional[int] = val[
:dim
]
lowercase__ : Tuple = val[
dim : dim * 2
]
lowercase__ : Dict = val[
-dim:
]
else:
if "weight" in key:
lowercase__ : Tuple = val[
:dim, :
]
lowercase__ : Tuple = val[
dim : dim * 2, :
]
lowercase__ : str = val[
-dim:, :
]
else:
lowercase__ : List[Any] = val[:dim]
lowercase__ : Optional[Any] = val[
dim : dim * 2
]
lowercase__ : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
lowercase__ : Dict = key_split[2]
lowercase__ : List[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : Any = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : List[Any] = val[:dim]
lowercase__ : Dict = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
else:
lowercase__ : Tuple = key_split[2]
lowercase__ : Optional[int] = config.text_config.hidden_size
if "weight" in key:
lowercase__ : Union[str, Any] = val[:dim, :]
lowercase__ : Any = val[
dim : dim * 2, :
]
lowercase__ : Tuple = val[-dim:, :]
else:
lowercase__ : List[Any] = val[:dim]
lowercase__ : Optional[int] = val[
dim : dim * 2
]
lowercase__ : Dict = val[-dim:]
else:
lowercase__ : Any = rename_key(UpperCAmelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase__ : List[str] = val.T
lowercase__ : Optional[Any] = val
return orig_state_dict
def __UpperCamelCase ( UpperCAmelCase ):
if num_frames == 8:
lowercase__ : Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
lowercase__ : Optional[int] = '''eating_spaghetti.npy'''
elif num_frames == 32:
lowercase__ : List[str] = '''eating_spaghetti_32_frames.npy'''
lowercase__ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=UpperCAmelCase , repo_type='''dataset''' , )
lowercase__ : Tuple = np.load(UpperCAmelCase )
return list(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ):
lowercase__ : str = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
lowercase__ : List[Any] = model_to_url[model_name]
lowercase__ : int = 8
if "16-frames" in model_name:
lowercase__ : int = 16
elif "shot" in model_name:
lowercase__ : Optional[int] = 32
lowercase__ : Any = get_xclip_config(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[Any] = XCLIPModel(UpperCAmelCase )
model.eval()
if "drive" in checkpoint_url:
lowercase__ : Optional[Any] = '''pytorch_model.bin'''
gdown.cached_download(UpperCAmelCase , UpperCAmelCase , quiet=UpperCAmelCase )
lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )['''model''']
else:
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(UpperCAmelCase )['''model''']
lowercase__ : List[str] = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
lowercase__ : int = XCLIPModel(UpperCAmelCase )
lowercase__ , lowercase__ : Any = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase__ : Tuple = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
lowercase__ : Optional[Any] = VideoMAEImageProcessor(size=UpperCAmelCase )
lowercase__ : List[Any] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
lowercase__ : Optional[Any] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
lowercase__ : List[Any] = XCLIPProcessor(image_processor=UpperCAmelCase , tokenizer=UpperCAmelCase )
lowercase__ : int = prepare_video(UpperCAmelCase )
lowercase__ : Optional[Any] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=UpperCAmelCase , return_tensors='''pt''' , padding=UpperCAmelCase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
lowercase__ : Dict = model(**UpperCAmelCase )
# Verify outputs
lowercase__ : str = outputs.logits_per_video
lowercase__ : Union[str, Any] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , UpperCAmelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase__ : Optional[int] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase__ : int = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
lowercase__ : Dict = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase__ : Optional[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
lowercase__ : Tuple = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase__ : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase__ : Any = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase__ : str = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase__ : Union[str, Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase__ : List[str] = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase__ : Any = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase__ : Union[str, Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase__ : List[str] = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase__ : str = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase__ : str = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase__ : Tuple = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase__ : List[str] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase__ : List[str] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(UpperCAmelCase , organization='''nielsr''' )
processor.push_to_hub(UpperCAmelCase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(UpperCAmelCase , organization='''nielsr''' )
if __name__ == "__main__":
__a: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a: Optional[int] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 428
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : Optional[Any] = 0
count += depth_first_search(UpperCAmelCase , row + 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , row - 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col + 1 , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col - 1 , UpperCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 1
|
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = """WhisperFeatureExtractor"""
__lowerCamelCase : List[str] = """WhisperTokenizer"""
def __init__( self , a , a):
super().__init__(a , a)
lowercase__ : Optional[int] = self.feature_extractor
lowercase__ : Union[str, Any] = False
def snake_case_ ( self , a=None , a=None , a=True):
return self.tokenizer.get_decoder_prompt_ids(task=a , language=a , no_timestamps=a)
def __call__( self , *a , **a):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a)
lowercase__ : Optional[int] = kwargs.pop('audio' , a)
lowercase__ : Union[str, Any] = kwargs.pop('sampling_rate' , a)
lowercase__ : Any = kwargs.pop('text' , a)
if len(a) > 0:
lowercase__ : Optional[int] = args[0]
lowercase__ : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
lowercase__ : Tuple = self.feature_extractor(a , *a , sampling_rate=a , **a)
if text is not None:
lowercase__ : str = self.tokenizer(a , **a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : Tuple = encodings['input_ids']
return inputs
def snake_case_ ( self , *a , **a):
return self.tokenizer.batch_decode(*a , **a)
def snake_case_ ( self , *a , **a):
return self.tokenizer.decode(*a , **a)
def snake_case_ ( self , a , a="np"):
return self.tokenizer.get_prompt_ids(a , return_tensors=a)
| 164
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a = 6):
lowercase__ : Node | None = None
lowercase__ : Node | None = None
self.create_linked_list(a)
def snake_case_ ( self , a):
lowercase__ : str = Node()
lowercase__ : str = current_node
lowercase__ : str = current_node
lowercase__ : Any = current_node
for _ in range(1 , a):
lowercase__ : List[str] = Node()
lowercase__ : Optional[Any] = current_node
lowercase__ : Union[str, Any] = previous_node
lowercase__ : List[str] = current_node
lowercase__ : Optional[int] = self.front
lowercase__ : str = previous_node
def snake_case_ ( self):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case_ ( self):
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case_ ( self , a):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ : Dict = self.rear.next
if self.rear:
lowercase__ : Any = data
def snake_case_ ( self):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ : Any = self.front.data
lowercase__ : Optional[Any] = None
return data
lowercase__ : Optional[Any] = self.front
lowercase__ : str = old_front.next
lowercase__ : Union[str, Any] = old_front.data
lowercase__ : List[str] = None
return data
def snake_case_ ( self):
if self.is_empty():
raise Exception('Empty Queue')
def snake_case_ ( self):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue')
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any | None = None
lowercase__ : Node | None = None
lowercase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
| 1
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCamelCase : List[Any] = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : int = 14 ) -> None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
__magic_name__ : Union[str, Any] = primes[group]['''prime''']
__magic_name__ : str = primes[group]['''generator''']
__magic_name__ : List[Any] = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase__ ( self : Dict ) -> str:
__magic_name__ : Optional[Any] = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase_ )[2:]
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : str ) -> str:
__magic_name__ : Optional[Any] = int(lowerCamelCase_ , base=16 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('''Invalid public key''' )
__magic_name__ : List[Any] = pow(lowerCamelCase_ , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_ , (prime - 1) // 2 , lowerCamelCase_ ) == 1
)
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 14 ) -> str:
__magic_name__ : Optional[int] = int(lowerCamelCase_ , base=16 )
__magic_name__ : Any = int(lowerCamelCase_ , base=16 )
__magic_name__ : List[Any] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''Invalid public key''' )
__magic_name__ : List[str] = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501
|
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[float, float] ,__A: int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowerCamelCase : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 501
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> bool:
__snake_case = get_failure_array(snake_case_ )
# 2) Step through text searching for pattern
__snake_case , __snake_case = 0, 0 # index into text, pattern
while i < len(snake_case_ ):
if pattern[j] == text[i]:
if j == (len(snake_case_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__snake_case = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ ( snake_case_ : str ) -> list[int]:
__snake_case = [0]
__snake_case = 0
__snake_case = 1
while j < len(snake_case_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__snake_case = failure[i - 1]
continue
j += 1
failure.append(snake_case_ )
return failure
if __name__ == "__main__":
# Test 1)
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
snake_case_ = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 592
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple:
# initialize config
if "resnet-50" in model_name:
__snake_case = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
__snake_case = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
__snake_case = DetrConfig(use_timm_backbone=snake_case_ , backbone_config=snake_case_ )
# set label attributes
__snake_case = '''panoptic''' in model_name
if is_panoptic:
__snake_case = 250
else:
__snake_case = 91
__snake_case = '''huggingface/label-files'''
__snake_case = '''coco-detection-id2label.json'''
__snake_case = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(snake_case_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( snake_case_ : Dict ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : int ) -> str:
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : List[Any]=False ) -> List[str]:
__snake_case = ''''''
if is_panoptic:
__snake_case = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:256, :]
__snake_case = in_proj_bias[:256]
__snake_case = in_proj_weight[256:512, :]
__snake_case = in_proj_bias[256:512]
__snake_case = in_proj_weight[-256:, :]
__snake_case = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:256, :]
__snake_case = in_proj_bias[:256]
__snake_case = in_proj_weight[256:512, :]
__snake_case = in_proj_bias[256:512]
__snake_case = in_proj_weight[-256:, :]
__snake_case = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__snake_case = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__snake_case = in_proj_weight_cross_attn[:256, :]
__snake_case = in_proj_bias_cross_attn[:256]
__snake_case = in_proj_weight_cross_attn[256:512, :]
__snake_case = in_proj_bias_cross_attn[256:512]
__snake_case = in_proj_weight_cross_attn[-256:, :]
__snake_case = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ) -> int:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : str=None , snake_case_ : Dict=False ) -> Dict:
__snake_case , __snake_case = get_detr_config(snake_case_ )
# load original model from torch hub
__snake_case = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f"""Converting model {model_name}...""" )
__snake_case = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=snake_case_ ).eval()
__snake_case = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case_ ):
if is_panoptic:
__snake_case = '''detr.''' + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__snake_case = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
# finally, create HuggingFace model and load state dict
__snake_case = DetrForSegmentation(snake_case_ ) if is_panoptic else DetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion on an image
__snake_case = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__snake_case = DetrImageProcessor(format=snake_case_ )
__snake_case = processor(images=prepare_img() , return_tensors='''pt''' )
__snake_case = encoding['''pixel_values''']
__snake_case = detr(snake_case_ )
__snake_case = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
snake_case_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 592
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : Dict = 5
# Realm tok
_lowerCAmelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname, "realm_tokenizer")
os.makedirs(__a, exist_ok=__a)
_lowerCAmelCase : Dict = os.path.join(__a, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname, "realm_block_records")
os.makedirs(__a, exist_ok=__a)
def snake_case__ ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, "realm_tokenizer"))
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = RealmConfig(num_block_records=self.num_block_records)
return config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
})
return dataset
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
], dtype=__a, )
return block_records
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), )
return retriever
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_config()
_lowerCAmelCase : List[Any] = self.get_dummy_retriever()
_lowerCAmelCase : Union[str, Any] = retriever.tokenizer
_lowerCAmelCase : Tuple = np.array([0, 3], dtype="long")
_lowerCAmelCase : str = tokenizer(["Test question"]).input_ids
_lowerCAmelCase : Tuple = tokenizer(
["the fourth"], add_special_tokens=__a, return_token_type_ids=__a, return_attention_mask=__a, ).input_ids
_lowerCAmelCase : List[str] = config.reader_seq_len
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = retriever(
__a, __a, answer_ids=__a, max_length=__a, return_tensors="np")
self.assertEqual(len(__a), 2)
self.assertEqual(len(__a), 2)
self.assertEqual(len(__a), 2)
self.assertEqual(concat_inputs.input_ids.shape, (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape, (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]), ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"], )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]), ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"], )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_config()
_lowerCAmelCase : Tuple = self.get_dummy_retriever()
_lowerCAmelCase : str = retriever.tokenizer
_lowerCAmelCase : List[Any] = np.array([0, 3, 5], dtype="long")
_lowerCAmelCase : Optional[Any] = tokenizer(["Test question"]).input_ids
_lowerCAmelCase : List[str] = tokenizer(
["the fourth", "longer longer"], add_special_tokens=__a, return_token_type_ids=__a, return_attention_mask=__a, ).input_ids
_lowerCAmelCase : int = config.reader_seq_len
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = retriever(
__a, __a, answer_ids=__a, max_length=__a, return_tensors="np")
self.assertEqual([False, True, True], __a)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], __a)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
# Test local path
_lowerCAmelCase : str = retriever.from_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
self.assertEqual(retriever.block_records[0], B"This is the first record")
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download") as mock_hf_hub_download:
_lowerCAmelCase : Dict = os.path.join(
os.path.join(self.tmpdirname, "realm_block_records"), _REALM_BLOCK_RECORDS_FILENAME)
_lowerCAmelCase : Any = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa")
self.assertEqual(retriever.block_records[0], B"This is the first record")
| 658
|
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__A ='https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__A =BASE_URL + '/user'
# https://github.com/settings/tokens
__A =os.environ.get('USER_TOKEN', '')
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = {
"""Authorization""": f'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 407
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a_ = {'''UserAgent''': UserAgent().random}
def _a ( UpperCamelCase_ : Any ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = script.contents[0]
lowerCAmelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = F"https://www.instagram.com/{username}/"
lowerCAmelCase__ = self.get_json()
def UpperCAmelCase ( self )-> dict:
'''simple docstring'''
lowerCAmelCase__ = requests.get(self.url , headers=__UpperCAmelCase ).text
lowerCAmelCase__ = BeautifulSoup(__UpperCAmelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self )-> str:
'''simple docstring'''
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self )-> str:
'''simple docstring'''
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["username"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase ( self )-> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def UpperCAmelCase ( self )-> bool:
'''simple docstring'''
return self.user_data["is_private"]
def _a ( UpperCamelCase_ : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
lowerCAmelCase__ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = InstagramUser('''github''')
print(instagram_user)
print(F"{instagram_user.number_of_posts = }")
print(F"{instagram_user.number_of_followers = }")
print(F"{instagram_user.number_of_followings = }")
print(F"{instagram_user.email = }")
print(F"{instagram_user.website = }")
print(F"{instagram_user.profile_picture_url = }")
print(F"{instagram_user.is_verified = }")
print(F"{instagram_user.is_private = }")
| 339
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "timesformer"
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="divided_space_time" , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = image_size
snake_case: Tuple = patch_size
snake_case: int = num_channels
snake_case: int = num_frames
snake_case: Tuple = hidden_size
snake_case: Optional[int] = num_hidden_layers
snake_case: Any = num_attention_heads
snake_case: Tuple = intermediate_size
snake_case: List[str] = hidden_act
snake_case: int = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: List[str] = initializer_range
snake_case: int = layer_norm_eps
snake_case: str = qkv_bias
snake_case: Optional[int] = attention_type
snake_case: List[str] = drop_path_rate
| 706
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 692
| 0
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[Any]:
lowerCAmelCase__ : str = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowerCAmelCase__ , lowerCAmelCase__ : str = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCAmelCase__ : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase )
assert base_extractor.is_extractable(UpperCamelCase )
lowerCAmelCase__ : int = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(UpperCamelCase , UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase__ : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
lowerCAmelCase__ : int = output_path.read_text(encoding='''utf-8''' )
lowerCAmelCase__ : str = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowerCAmelCase__ : Optional[int] = input_paths[compression_format]
if input_path is None:
lowerCAmelCase__ : str = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = Extractor.infer_extractor_format(UpperCamelCase )
assert extractor_format is not None
lowerCAmelCase__ : Optional[int] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase__ : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
lowerCAmelCase__ : Optional[int] = output_path.read_text(encoding='''utf-8''' )
lowerCAmelCase__ : Optional[Any] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
import tarfile
lowerCAmelCase__ : List[str] = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowerCAmelCase__ : Optional[Any] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(UpperCamelCase , '''w''' ) as f:
f.add(UpperCamelCase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
import tarfile
lowerCAmelCase__ : Optional[Any] = tmp_path / '''data_sym_link'''
directory.mkdir()
lowerCAmelCase__ : List[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=UpperCamelCase )
with tarfile.TarFile(UpperCamelCase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : str = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowerCAmelCase__ : Any = insecure_tar_files[insecure_tar_file]
lowerCAmelCase__ : str = tmp_path / '''extracted'''
TarExtractor.extract(UpperCamelCase , UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowerCAmelCase__ : str = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowerCAmelCase__ : Dict = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(UpperCamelCase )
assert zipfile.is_zipfile(str(UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase ) # but we're right
| 678
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( A__ : Any ) -> Tuple:
lowerCamelCase_ : str = args.pruning_method
lowerCamelCase_ : Any = args.threshold
lowerCamelCase_ : Union[str, Any] = args.model_name_or_path.rstrip("""/""" )
lowerCamelCase_ : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
lowerCamelCase_ : Dict = torch.load(os.path.join(A__ , """pytorch_model.bin""" ) )
lowerCamelCase_ : Union[str, Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCamelCase_ : str = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowerCamelCase_ : List[Any] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
lowerCamelCase_ : Tuple = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowerCamelCase_ : Any = MagnitudeBinarizer.apply(inputs=A__ , threshold=A__ )
lowerCamelCase_ : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCamelCase_ : List[Any] = name[:-6]
lowerCamelCase_ : Any = model[f'''{prefix_}mask_scores''']
lowerCamelCase_ : Tuple = TopKBinarizer.apply(A__ , A__ )
lowerCamelCase_ : List[str] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCamelCase_ : str = name[:-6]
lowerCamelCase_ : int = model[f'''{prefix_}mask_scores''']
lowerCamelCase_ : Optional[int] = ThresholdBinarizer.apply(A__ , A__ , A__ )
lowerCamelCase_ : List[str] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCamelCase_ : Optional[int] = name[:-6]
lowerCamelCase_ : str = model[f'''{prefix_}mask_scores''']
lowerCamelCase_ : Dict = -0.1, 1.1
lowerCamelCase_ : Union[str, Any] = torch.sigmoid(A__ )
lowerCamelCase_ : List[str] = s * (r - l) + l
lowerCamelCase_ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
lowerCamelCase_ : str = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowerCamelCase_ : Optional[Any] = os.path.join(
os.path.dirname(A__ ) , f'''bertarized_{os.path.basename(A__ )}''' )
if not os.path.isdir(A__ ):
shutil.copytree(A__ , A__ )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(A__ , os.path.join(A__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
snake_case__ : Optional[int] = parser.parse_args()
main(args)
| 721
|
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int ) ->None:
lowerCamelCase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase_ : str = False
def _lowerCAmelCase ( self : List[Any] , __a : list[str] ) ->None:
for word in words:
self.insert(__a )
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
lowerCamelCase_ : int = self
for char in word:
if char not in curr.nodes:
lowerCamelCase_ : Optional[Any] = TrieNode()
lowerCamelCase_ : Union[str, Any] = curr.nodes[char]
lowerCamelCase_ : int = True
def _lowerCAmelCase ( self : Tuple , __a : str ) ->bool:
lowerCamelCase_ : Any = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase_ : Any = curr.nodes[char]
return curr.is_leaf
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
def _delete(__a : TrieNode , __a : str , __a : int ) -> bool:
if index == len(__a ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase_ : Tuple = False
return len(curr.nodes ) == 0
lowerCamelCase_ : Dict = word[index]
lowerCamelCase_ : Dict = curr.nodes.get(__a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase_ : Any = _delete(__a , __a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __a , 0 )
def __lowerCamelCase ( A__ : TrieNode , A__ : str ) -> None:
if node.is_leaf:
print(A__ , end=""" """ )
for key, value in node.nodes.items():
print_words(A__ , word + key )
def __lowerCamelCase ( ) -> bool:
lowerCamelCase_ : List[Any] = """banana bananas bandana band apple all beast""".split()
lowerCamelCase_ : Optional[int] = TrieNode()
root.insert_many(A__ )
# print_words(root, "")
assert all(root.find(A__ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCamelCase ( A__ : str , A__ : bool ) -> None:
print(str(A__ ) , """works!""" if passes else """doesn't work :(""" )
def __lowerCamelCase ( ) -> None:
assert test_trie()
def __lowerCamelCase ( ) -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 171
| 0
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A_ :
def __init__( self : Optional[int] , snake_case__ : Any , snake_case__ : List[Any]=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : Any=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[Any]=True , snake_case__ : str=True , snake_case__ : Optional[int]=99 , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : int=37 , snake_case__ : str="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=5_12 , snake_case__ : Tuple=16 , snake_case__ : str=2 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=3 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=None , ):
lowercase = parent
lowercase = 13
lowercase = 7
lowercase = True
lowercase = True
lowercase = True
lowercase = True
lowercase = 99
lowercase = 32
lowercase = 2
lowercase = 4
lowercase = 37
lowercase = """gelu"""
lowercase = 0.1
lowercase = 0.1
lowercase = 5_12
lowercase = 16
lowercase = 2
lowercase = 0.02
lowercase = 3
lowercase = 4
lowercase = None
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] ):
lowercase = TFRoFormerModel(config=snake_case__ )
lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase = [input_ids, input_mask]
lowercase = model(snake_case__ )
lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ):
lowercase = True
lowercase = TFRoFormerForCausalLM(config=snake_case__ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(snake_case__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
lowercase = TFRoFormerForMaskedLM(config=snake_case__ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Any , snake_case__ : Optional[int] ):
lowercase = self.num_labels
lowercase = TFRoFormerForSequenceClassification(config=snake_case__ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any ):
lowercase = self.num_choices
lowercase = TFRoFormerForMultipleChoice(config=snake_case__ )
lowercase = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict ):
lowercase = self.num_labels
lowercase = TFRoFormerForTokenClassification(config=snake_case__ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any ):
lowercase = TFRoFormerForQuestionAnswering(config=snake_case__ )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A_ ( __a , __a , unittest.TestCase ):
_A :Optional[int] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_A :List[str] = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_A :List[Any] = False
_A :Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = TFRoFormerModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(snake_case__ )
@require_tf
class A_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase = model(snake_case__ )[0]
# TODO Replace vocab size
lowercase = 5_00_00
lowercase = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1E-4 )
@require_tf
class A_ ( unittest.TestCase ):
_A :Dict = 1E-4
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowercase = tf.constant([[4, 10]] )
lowercase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase = emba(input_ids.shape )
lowercase = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
lowercase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
lowercase = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
@require_tf
class A_ ( unittest.TestCase ):
_A :Tuple = 1E-4
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# 2,12,16,64
lowercase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowercase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowercase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase = embed_positions([2, 16, 7_68] )[None, None, :, :]
lowercase , lowercase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case__ , snake_case__ , snake_case__ )
lowercase = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
lowercase = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
| 428
|
import copy
import random
from transformers import CLIPTokenizer
class A_ ( __a ):
def __init__( self : Tuple , *snake_case__ : Any , **snake_case__ : Tuple ):
super().__init__(*snake_case__ , **snake_case__ )
lowercase = {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Any , *snake_case__ : Tuple , **snake_case__ : str ):
lowercase = super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Any , *snake_case__ : int , snake_case__ : List[str]=1 , **snake_case__ : str ):
lowercase = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
lowercase = []
for i in range(snake_case__ ):
lowercase = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowercase = output
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : int , snake_case__ : int=False , snake_case__ : str=1.0 ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase = self.token_map[placeholder_token]
lowercase = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
lowercase = text.replace(snake_case__ , """ """.join(snake_case__ ) )
return text
def __call__( self : Optional[Any] , snake_case__ : str , *snake_case__ : Any , snake_case__ : Dict=False , snake_case__ : Any=1.0 , **snake_case__ : Any ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[Any] , *snake_case__ : Tuple , snake_case__ : Dict=False , snake_case__ : Optional[Any]=1.0 , **snake_case__ : Tuple ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 428
| 1
|
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class _A :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = False):
a : int = scheduler
a : str = optimizers if isinstance(__UpperCAmelCase , (list, tuple)) else [optimizers]
a : Optional[Any] = split_batches
a : List[str] = step_with_optimizer
a : Any = GradientState()
def __snake_case ( self : Dict , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : int):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
a : Union[str, Any] = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps"):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase)
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : str):
return self.scheduler.get_last_lr()
def __snake_case ( self : Union[str, Any]):
return self.scheduler.state_dict()
def __snake_case ( self : Tuple , __UpperCAmelCase : Optional[Any]):
self.scheduler.load_state_dict(__UpperCAmelCase)
def __snake_case ( self : List[Any]):
return self.scheduler.get_lr()
def __snake_case ( self : Any , *__UpperCAmelCase : str , **__UpperCAmelCase : List[Any]):
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase)
| 135
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowercase = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def lowercase ( A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : str = SavedModel()
a : List[Any] = []
with open(os.path.join(A_ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
a : Optional[int] = json.load(A_ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A_ )] )
with open(A_ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
a : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
a : Union[str, Any] = sorted(A_ )
a : Dict = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A_ )
if strict and len(A_ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(A_ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*A_ , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
__lowercase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 135
| 1
|
a : Union[str, Any] = 2_5_6
# Modulus to hash a string
a : str = 1_0_0_0_0_0_3
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: List[str] = len(UpperCAmelCase__ )
lowerCAmelCase_: Any = len(UpperCAmelCase__ )
if p_len > t_len:
return False
lowerCAmelCase_: List[Any] = 0
lowerCAmelCase_: List[str] = 0
lowerCAmelCase_: Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase__ ):
lowerCAmelCase_: str = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_: Union[str, Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_: Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_: List[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def snake_case__ ( ):
lowerCAmelCase_: Optional[Any] = "abc1abc12"
lowerCAmelCase_: Tuple = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowerCAmelCase_: Union[str, Any] = "alskfjaldsk23adsfabcabc"
assert rabin_karp(UpperCAmelCase__ , UpperCAmelCase__ ) and not rabin_karp(UpperCAmelCase__ , UpperCAmelCase__ )
# Test 2)
lowerCAmelCase_: str = "ABABX"
lowerCAmelCase_: Tuple = "ABABZABABYABABX"
assert rabin_karp(UpperCAmelCase__ , UpperCAmelCase__ )
# Test 3)
lowerCAmelCase_: Tuple = "AAAB"
lowerCAmelCase_: Optional[int] = "ABAAAAAB"
assert rabin_karp(UpperCAmelCase__ , UpperCAmelCase__ )
# Test 4)
lowerCAmelCase_: List[str] = "abcdabcy"
lowerCAmelCase_: Tuple = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(UpperCAmelCase__ , UpperCAmelCase__ )
# Test 5)
lowerCAmelCase_: List[str] = "Lü"
lowerCAmelCase_: Optional[Any] = "Lüsai"
assert rabin_karp(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase_: Optional[int] = "Lue"
assert not rabin_karp(UpperCAmelCase__ , UpperCAmelCase__ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 613
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A_ : List[Any] =argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
A_ : List[Any] =parser.parse_args()
A_ : Tuple ="""cpu"""
A_ : List[Any] ="""a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
A_ : Union[str, Any] ="""path-to-your-trained-model"""
A_ : str =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A_ : Dict =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A_ : List[Any] =pipe.to(device)
# to channels last
A_ : Optional[int] =pipe.unet.to(memory_format=torch.channels_last)
A_ : Optional[int] =pipe.vae.to(memory_format=torch.channels_last)
A_ : Optional[Any] =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A_ : Union[str, Any] =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A_ : Any =torch.randn(2, 4, 64, 64)
A_ : List[Any] =torch.rand(1) * 999
A_ : str =torch.randn(2, 77, 768)
A_ : Optional[int] =(sample, timestep, encoder_hidden_status)
try:
A_ : Dict =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A_ : List[Any] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A_ : int =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A_ : Tuple =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A_ : Any =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A_ : Any =666
A_ : int =torch.Generator(device).manual_seed(seed)
A_ : Any ={"""generator""": generator}
if args.steps is not None:
A_ : List[str] =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A_ : Any =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 483
| 0
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowercase__ : Dict = ['''small''', '''medium''', '''large''']
lowercase__ : List[Any] = '''lm_head.decoder.weight'''
lowercase__ : int = '''lm_head.weight'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Dict:
__A : Optional[Any] = torch.load(__snake_case )
__A : Any = d.pop(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowercase__ : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowercase__ : Any = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
lowercase__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 700
|
'''simple docstring'''
import sys
import turtle
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase__ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase__ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 338
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__:
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Dict=13 , lowerCAmelCase : Optional[Any]=30 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[str]=10 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : List[str]=None , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a__( self : str )-> Optional[int]:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = ViTMSNModel(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__( self : str , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = ViTMSNForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase = model(__lowercase , labels=__lowercase )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = ViTMSNForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__magic_name__ : str = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__magic_name__ : List[str] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : str = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : List[Any] = False
__magic_name__ : int = False
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = ViTMSNModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def a__( self : Optional[int] )-> Tuple:
"""simple docstring"""
pass
def a__( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def a__( self : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowercase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ViTMSNModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def a__( self : Optional[int] )-> int:
"""simple docstring"""
torch.manual_seed(2 )
UpperCAmelCase = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__lowercase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__lowercase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
UpperCAmelCase = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 210
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A__ = parser.parse_args()
if args.model_type == "roberta":
A__ = RobertaForMaskedLM.from_pretrained(args.model_name)
A__ = '''roberta'''
elif args.model_type == "gpt2":
A__ = GPTaLMHeadModel.from_pretrained(args.model_name)
A__ = '''transformer'''
A__ = model.state_dict()
A__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A__ = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A__ = f"""{prefix}.embeddings.{w}.weight"""
A__ = state_dict[param_name]
for w in ["weight", "bias"]:
A__ = f"""{prefix}.embeddings.LayerNorm.{w}"""
A__ = state_dict[param_name]
# Transformer Blocks #
A__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
A__ = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A__ = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
A__ = state_dict[f"""lm_head.dense.{w}"""]
A__ = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A__ = state_dict[f"""{prefix}.ln_f.{w}"""]
A__ = state_dict['''lm_head.weight''']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 252
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , a_ , )
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : List[Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.array(a_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Tuple = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : Dict = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : int = torch.from_numpy(a_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = torch.cat(a_ , dim=0 )
return image
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
if isinstance(a_ , torch.Tensor ):
return mask
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = mask[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : str = torch.from_numpy(a_ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(a_ , dim=0 )
return mask
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : UNetaDModel
snake_case__ : RePaintScheduler
def __init__( self , lowercase__ , lowercase__ ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ , lowercase__ = 250 , lowercase__ = 0.0 , lowercase__ = 10 , lowercase__ = 10 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE : Optional[int] = image
SCREAMING_SNAKE_CASE : List[str] = _preprocess_image(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Any = _preprocess_mask(lowercase__ )
SCREAMING_SNAKE_CASE : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : int = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : Dict = original_image.shape
SCREAMING_SNAKE_CASE : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase__ , lowercase__ , lowercase__ , self.device )
SCREAMING_SNAKE_CASE : Optional[Any] = eta
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE : Tuple = generator[0] if isinstance(lowercase__ , lowercase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE : Optional[int] = self.unet(lowercase__ , lowercase__ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.undo_step(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : int = t
SCREAMING_SNAKE_CASE : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 179
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __lowerCAmelCase ( a_ , a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Tuple = burst_time[i]
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Dict = -1
for i in range(a_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(a_ )
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : Dict = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCAmelCase ( a_ , a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [0] * no_of_processes
for i in range(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
_lowerCAmelCase :Optional[int] = 4
_lowerCAmelCase :Optional[int] = [2, 5, 3, 7]
_lowerCAmelCase :List[str] = [0, 0, 0, 0]
_lowerCAmelCase :Union[str, Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCAmelCase :Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 179
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 108
|
import os
import sys
__a: Union[str, Any] = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__a: Union[str, Any] = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Union[str, Any]:
return AutoConfig.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Any:
return AutoTokenizer.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple:
return AutoModel.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple:
return AutoModelForCausalLM.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Optional[Any]:
return AutoModelForMaskedLM.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__snake_case , **__snake_case )
| 108
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[int]=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__UpperCAmelCase : int = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : List[Any] = min_resolution
__UpperCAmelCase : Dict = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : str = size if size is not None else {"height": 18, "width": 20}
__UpperCAmelCase : List[str] = do_thumbnail
__UpperCAmelCase : Any = do_align_axis
__UpperCAmelCase : Optional[int] = do_pad
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : str = image_mean
__UpperCAmelCase : Union[str, Any] = image_std
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_thumbnail" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_align_long_axis" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
__UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
__UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
@is_flaky()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
# Initialize image_processing
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : str = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[int] = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
# Initialize image_processing
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 701
|
'''simple docstring'''
lowerCAmelCase__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : List[Any] = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCAmelCase : Optional[int] = year // 100
__UpperCAmelCase : int = (5 * (century % 4) + 2) % 7
__UpperCAmelCase : Optional[Any] = year % 100
__UpperCAmelCase : int = centurian % 12
__UpperCAmelCase : Optional[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCAmelCase : Union[str, Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCAmelCase : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = to_pil_image(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = pil_image.size
lowerCAmelCase__ :Dict = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCAmelCase__ :List[str] = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
lowerCAmelCase__ :Optional[Any] = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :str = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :int = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ :List[Any] = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowerCAmelCase__ :str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = "" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase__ :Union[str, Any] = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :int = do_resize
lowerCAmelCase__ :Union[str, Any] = size
lowerCAmelCase__ :List[str] = resample
lowerCAmelCase__ :int = do_rescale
lowerCAmelCase__ :int = rescale_value
lowerCAmelCase__ :List[Any] = do_normalize
lowerCAmelCase__ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ :Tuple = apply_ocr
lowerCAmelCase__ :List[str] = ocr_lang
lowerCAmelCase__ :Any = tesseract_config
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
lowerCAmelCase__ :Tuple = (size['height'], size['width'])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Tuple = size if size is not None else self.size
lowerCAmelCase__ :str = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ :int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ :Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ :Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ :str = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ :Optional[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :Dict = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
words_batch.append(__UpperCAmelCase )
boxes_batch.append(__UpperCAmelCase )
if do_resize:
lowerCAmelCase__ :Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :List[Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :List[Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ :Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=__UpperCAmelCase )
if apply_ocr:
lowerCAmelCase__ :int = words_batch
lowerCAmelCase__ :Tuple = boxes_batch
return data
| 93
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not is_accelerate_available():
return method
snake_case__ : Union[str, Any] = version.parse(accelerate.__version__).base_version
if version.parse(UpperCAmelCase_) < version.parse("""0.17.0"""):
return method
def wrapper(self , *UpperCAmelCase_ , **UpperCAmelCase_):
if hasattr(self , """_hf_hook""") and hasattr(self._hf_hook , """pre_forward"""):
self._hf_hook.pre_forward(self)
return method(self , *UpperCAmelCase_ , **UpperCAmelCase_)
return wrapper
| 648
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Tuple = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Tuple = DebertaVaTokenizer
__lowercase :Optional[Any] = DebertaVaTokenizerFast
__lowercase :str = True
__lowercase :Any = True
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = '''this is a test'''
lowerCamelCase_ = '''this is a test'''
return input_text, output_text
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = '''<pad>'''
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase__ ) , 30_001 )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ''' \tHeLLo!how \n Are yoU? '''
lowerCamelCase_ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = '''I was born in 92000, and this is falsé.'''
lowerCamelCase_ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = DebertaVaTokenizerFast(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = '''I was born in 92000, and this is falsé.'''
lowerCamelCase_ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = '''I was born in 92000, and this is falsé.'''
lowerCamelCase_ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = '''I was born in 92000, and this is falsé.'''
lowerCamelCase_ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = ''' \tHeLLo!how \n Are yoU? '''
lowerCamelCase_ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = '''I was born in 92000, and this is falsé.'''
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = '''This is a test'''
lowerCamelCase_ = [13, 1, 4_398, 25, 21, 1_289]
lowerCamelCase_ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowerCamelCase_ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowerCamelCase_ = DebertaVaTokenizerFast(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# fmt: off
lowerCamelCase_ = '''I was born in 92000, and this is falsé.'''
lowerCamelCase_ = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
lowerCamelCase_ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowerCamelCase_ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowerCamelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = DebertaVaTokenizer(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.encode('''sequence builders''' )
lowerCamelCase_ = tokenizer.encode('''multi-sequence build''' )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase__ , )
@slow
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = {'''input_ids''': [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 66
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple ):
# Load checkpoint
lowerCamelCase_ = torch.load(_lowerCamelCase , map_location='''cpu''' )
lowerCamelCase_ = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
lowerCamelCase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase_ = v
else:
lowerCamelCase_ = v
lowerCamelCase_ = chkpt['''params''']
lowerCamelCase_ = {n: v for n, v in config.items() if not isinstance(_lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase_ = chkpt['''dico_word2id''']
lowerCamelCase_ = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 66
| 1
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase = ["""text""", """image""", """audio"""]
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCamelCase :
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs'))
self.assertTrue(hasattr(self.tool , 'outputs'))
__lowercase =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
__lowercase =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =create_inputs(self.tool.inputs)
__lowercase =self.tool(*lowerCamelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
__lowercase =[outputs]
self.assertListEqual(output_types(lowerCamelCase_) , self.tool.outputs)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description'))
self.assertTrue(hasattr(self.tool , 'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =create_inputs(self.tool.inputs)
__lowercase =self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowercase =[outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs))
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs):
__lowercase =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_))
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =create_inputs(self.tool.inputs)
__lowercase =[]
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
__lowercase =self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowercase =[outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs))
| 474
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None
| 34
| 0
|
def snake_case (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
assert x is not None
assert y is not None
lowerCamelCase__ = len(UpperCamelCase )
lowerCamelCase__ = len(UpperCamelCase )
# declaring the array for storing the dp values
lowerCamelCase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__ = """"""
lowerCamelCase__ , lowerCamelCase__ = m, n
while i > 0 and j > 0:
lowerCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
a__ : Optional[int] = """AGGTAB"""
a__ : int = """GXTXAYB"""
a__ : Optional[int] = 4
a__ : Union[str, Any] = """GTAB"""
a__ : Optional[Any] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 720
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Dict , a_ : Any , a_ : str ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _UpperCamelCase ( self : Dict , a_ : List[str] , a_ : Optional[int] ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Any , a_ : Optional[Any] , a_ : Optional[int] ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _UpperCamelCase ( self : List[Any] , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
def snake_case ():
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def snake_case ():
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase__ = partial(a_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 235
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : int , _a : str , _a : Tuple ) -> str:
__lowerCamelCase : Optional[Any] = params
__lowerCamelCase : Any = np.array(_a )
__lowerCamelCase : int = np.array([len(_a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , _a : Optional[int] ) -> List[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self : int ) -> Any:
return len(self.lengths )
def _lowercase ( self : Tuple ) -> List[Any]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowercase ( self : Optional[int] ) -> Any:
__lowerCamelCase : List[str] = self.params.max_model_input_size
__lowerCamelCase : List[Any] = self.lengths > max_len
logger.info(f'Splitting {sum(_a )} too long sequences.' )
def divide_chunks(_a : Union[str, Any] , _a : Any ):
return [l[i : i + n] for i in range(0 , len(_a ) , _a )]
__lowerCamelCase : Dict = []
__lowerCamelCase : Tuple = []
if self.params.mlm:
__lowerCamelCase ,__lowerCamelCase : Any = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
__lowerCamelCase ,__lowerCamelCase : Dict = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__lowerCamelCase : Tuple = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__lowerCamelCase : str = np.insert(_a , 0 , _a )
if sub_s[-1] != sep_id:
__lowerCamelCase : List[str] = np.insert(_a , len(_a ) , _a )
assert len(_a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_a )
new_tok_ids.extend(_a )
new_lengths.extend([len(_a ) for l in sub_seqs] )
__lowerCamelCase : str = np.array(_a )
__lowerCamelCase : int = np.array(_a )
def _lowercase ( self : List[str] ) -> str:
__lowerCamelCase : Tuple = len(self )
__lowerCamelCase : Union[str, Any] = self.lengths > 11
__lowerCamelCase : Optional[Any] = self.token_ids[indices]
__lowerCamelCase : Union[str, Any] = self.lengths[indices]
__lowerCamelCase : List[Any] = len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def _lowercase ( self : str ) -> List[str]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__lowerCamelCase : int = self.params.special_tok_ids['unk_token']
__lowerCamelCase : str = len(self )
__lowerCamelCase : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__lowerCamelCase : Optional[int] = (unk_occs / self.lengths) < 0.5
__lowerCamelCase : str = self.token_ids[indices]
__lowerCamelCase : List[Any] = self.lengths[indices]
__lowerCamelCase : List[Any] = len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def _lowercase ( self : List[str] ) -> List[str]:
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowercase ( self : Union[str, Any] , _a : int ) -> Tuple:
__lowerCamelCase : Optional[Any] = [t[0] for t in batch]
__lowerCamelCase : Dict = [t[1] for t in batch]
assert len(_a ) == len(_a )
# Max for paddings
__lowerCamelCase : Dict = max(_a )
# Pad token ids
if self.params.mlm:
__lowerCamelCase : int = self.params.special_tok_ids['pad_token']
else:
__lowerCamelCase : Optional[int] = self.params.special_tok_ids['unk_token']
__lowerCamelCase : int = [list(t.astype(_a ) ) + [pad_idx] * (max_seq_len_ - len(_a )) for t in token_ids]
assert len(tk_ ) == len(_a )
assert all(len(_a ) == max_seq_len_ for t in tk_ )
__lowerCamelCase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__lowerCamelCase : List[Any] = torch.tensor(_a ) # (bs)
return tk_t, lg_t
| 459
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase : int = min(_lowerCAmelCase ) # min() finds the minimum value
__lowerCamelCase : List[Any] = max(_lowerCAmelCase ) # max() finds the maximum value
__lowerCamelCase : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowerCamelCase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowerCamelCase : Tuple = 0
for count in range(_lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
__lowerCamelCase : List[Any] = count + min_val
i += 1
def a_ ( ) -> str:
__lowerCamelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCAmelCase )
print('Sorted order is:' ,' '.join(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 459
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """trajectory_transformer"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=1_00 , __SCREAMING_SNAKE_CASE : List[Any]=5 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Tuple=2_49 , __SCREAMING_SNAKE_CASE : Tuple=6 , __SCREAMING_SNAKE_CASE : str=17 , __SCREAMING_SNAKE_CASE : Any=25 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Any=1_28 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0.00_06 , __SCREAMING_SNAKE_CASE : List[str]=5_12 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Tuple=5_02_56 , __SCREAMING_SNAKE_CASE : str=5_02_56 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
__a = vocab_size
__a = action_weight
__a = reward_weight
__a = value_weight
__a = max_position_embeddings
__a = block_size
__a = action_dim
__a = observation_dim
__a = transition_dim
__a = learning_rate
__a = n_layer
__a = n_head
__a = n_embd
__a = embd_pdrop
__a = attn_pdrop
__a = resid_pdrop
__a = initializer_range
__a = layer_norm_eps
__a = kaiming_initializer_range
__a = use_cache
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
| 715
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 525
| 0
|
import numpy as np
def __UpperCAmelCase( lowercase_ ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowerCamelCase = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowerCamelCase = {
'facebook/s2t-small-librispeech-asr': 1024,
}
_lowerCamelCase = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowerCamelCase = {'mustc': MUSTC_LANGS}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = []
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__=False , a__=False , a__=None , a__=None , a__ = None , **a__ , ):
"""simple docstring"""
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , do_upper_case=a__ , do_lower_case=a__ , tgt_lang=a__ , lang_codes=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCamelCase : Optional[int] = do_upper_case
_lowerCamelCase : Optional[Any] = do_lower_case
_lowerCamelCase : Tuple = load_json(a__)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Tuple = spm_file
_lowerCamelCase : Any = load_spm(a__ , self.sp_model_kwargs)
if lang_codes is not None:
_lowerCamelCase : List[Any] = lang_codes
_lowerCamelCase : List[str] = LANGUAGES[lang_codes]
_lowerCamelCase : Any = [F"""<lang:{lang}>""" for lang in self.langs]
_lowerCamelCase : Optional[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""") for lang in self.langs}
_lowerCamelCase : List[str] = self.lang_tokens
_lowerCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
_lowerCamelCase : Any = {}
@property
def __snake_case ( self):
"""simple docstring"""
return len(self.encoder)
@property
def __snake_case ( self):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = new_tgt_lang
self.set_tgt_lang_special_tokens(a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_lowerCamelCase : Any = [lang_code_id]
def __snake_case ( self , a__):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__)
def __snake_case ( self , a__):
"""simple docstring"""
return self.encoder.get(a__ , self.encoder[self.unk_token])
def __snake_case ( self , a__):
"""simple docstring"""
return self.decoder.get(a__ , self.unk_token)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCamelCase : List[Any] = self.sp_model.decode(a__)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCamelCase : Optional[int] = []
else:
current_sub_tokens.append(a__)
_lowerCamelCase : Tuple = self.sp_model.decode(a__)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case ( self , a__ , a__=None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , a__ , a__ = None , a__ = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
_lowerCamelCase : Tuple = [1] * len(self.prefix_tokens)
_lowerCamelCase : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__)) + suffix_ones
return prefix_ones + ([0] * len(a__)) + ([0] * len(a__)) + suffix_ones
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.__dict__.copy()
_lowerCamelCase : str = None
return state
def __setstate__( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Dict = load_spm(self.spm_file , self.sp_model_kwargs)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : str = Path(a__)
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_lowerCamelCase : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_lowerCamelCase : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , a__)
if os.path.abspath(self.spm_file) != os.path.abspath(a__) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a__)
elif not os.path.isfile(self.spm_file):
with open(a__ , '''wb''') as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (str(a__), str(a__))
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def __UpperCAmelCase( lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=2 )
| 114
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
a_ : str = '\\n Text data.\n Second line of data.'
a_ : List[str] = 'file'
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / (FILE_PATH + '.zstd')
SCREAMING_SNAKE_CASE = bytes(_SCREAMING_SNAKE_CASE , 'utf-8')
with zstd.open(_SCREAMING_SNAKE_CASE , 'wb') as f:
f.write(_SCREAMING_SNAKE_CASE)
return path
@pytest.fixture
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE) , 'w') as f:
f.write(_SCREAMING_SNAKE_CASE)
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
SCREAMING_SNAKE_CASE = input_paths[compression_format]
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE)
with open(_SCREAMING_SNAKE_CASE) as f:
SCREAMING_SNAKE_CASE = f.read()
with open(_SCREAMING_SNAKE_CASE) as f:
SCREAMING_SNAKE_CASE = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False])
@pytest.mark.parametrize('default_cache_dir' , [True, False])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'custom_cache'
SCREAMING_SNAKE_CASE = 'custom_extracted_dir'
SCREAMING_SNAKE_CASE = tmp_path / 'custom_extracted_path'
if default_extracted:
SCREAMING_SNAKE_CASE = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _SCREAMING_SNAKE_CASE)
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE))
SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE = xz_file
SCREAMING_SNAKE_CASE = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE)
)
SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE)
assert Path(_SCREAMING_SNAKE_CASE).parent.parts[-2:] == expected
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE).resolve())
assert cached_path(_SCREAMING_SNAKE_CASE) == text_file
# relative path
SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE).resolve().relative_to(Path(os.getcwd())))
assert cached_path(_SCREAMING_SNAKE_CASE) == text_file
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / '__missing_file__.txt')
with pytest.raises(_SCREAMING_SNAKE_CASE):
cached_path(_SCREAMING_SNAKE_CASE)
# relative path
SCREAMING_SNAKE_CASE = './__missing_file__.txt'
with pytest.raises(_SCREAMING_SNAKE_CASE):
cached_path(_SCREAMING_SNAKE_CASE)
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_from_cache(F'''tmp://{tmpfs_file}''')
with open(_SCREAMING_SNAKE_CASE) as f:
SCREAMING_SNAKE_CASE = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE)
def lowerCamelCase__ ():
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE):
cached_path('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE)
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE):
http_get('https://huggingface.co' , temp_file=_SCREAMING_SNAKE_CASE)
with pytest.raises(_SCREAMING_SNAKE_CASE):
http_head('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE)
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE):
ftp_get('ftp://huggingface.co' , temp_file=_SCREAMING_SNAKE_CASE)
with pytest.raises(_SCREAMING_SNAKE_CASE):
ftp_head('ftp://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE)
def lowerCamelCase__ (_UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE):
fsspec_get('s3://huggingface.co' , temp_file=_SCREAMING_SNAKE_CASE)
with pytest.raises(_SCREAMING_SNAKE_CASE):
fsspec_head('s3://huggingface.co')
| 701
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( A__ ):
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=False , a=True , a="None" , a=3 , a=4 , a=None , ) -> int:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = position_biased_input
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
self.parent.assertListEqual(list(result.loss.size()) , [])
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> int:
SCREAMING_SNAKE_CASE = DebertaVaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , token_type_ids=a)[0]
SCREAMING_SNAKE_CASE = model(a , token_type_ids=a)[0]
SCREAMING_SNAKE_CASE = model(a)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> str:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> int:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Dict = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Optional[int] = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : int = False
_lowercase : Dict = False
_lowercase : Optional[int] = False
_lowercase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = DebertaVaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(a)
self.assertIsNotNone(a)
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge')
SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]])
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4) , f'''{output[:, 1:4, 1:4]}''')
| 444
| 0
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : int = logging.get_logger(__name__)
a : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Dict = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : List[Any] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : str , a_ : int=None , a_ : str=None , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any=False , a_ : Union[str, Any]=True , **a_ : str , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Tuple , a_ : int , a_ : Any=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : List[str] , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 69
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = fa_score(
A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A )
return {"f1": float(A ) if score.size == 1 else score}
| 65
| 0
|
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _lowerCamelCase ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Dict = data
def __iter__( self )->List[Any]:
'''simple docstring'''
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=True ):
A_ : Union[str, Any] = Accelerator(even_batches=__lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
if iterable:
A_ : Tuple = DummyIterableDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
else:
A_ : Optional[Any] = TensorDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
A_ : Optional[Any] = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
A_ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
return dl
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
A_ : Dict = create_dataloader(accelerator=__lowerCAmelCase , dataset_size=__lowerCAmelCase , batch_size=__lowerCAmelCase )
A_ : Dict = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ):
A_ : str = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = create_accelerator(even_batches=__lowerCAmelCase )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = create_accelerator(even_batches=__lowerCAmelCase )
A_ : int = torch.nn.Linear(1 , 1 )
A_ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase )
A_ : Any = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
A_ : Union[str, Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCAmelCase ):
A_ : List[str] = ddp_model(batch[0].float() )
A_ : List[Any] = output.sum()
loss.backward()
batch_idxs.append(__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ):
A_ : str = True
A_ : Tuple = False
A_ : Any = create_accelerator(even_batches=__lowerCAmelCase )
A_ : Tuple = torch.nn.Linear(1 , 1 )
A_ : str = accelerator.prepare(__lowerCAmelCase )
A_ : Optional[int] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
A_ : Optional[int] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
A_ : Optional[int] = train_dl.batch_sampler.even_batches
A_ : Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[Any] = True
A_ : int = False
A_ : Optional[int] = create_accelerator(even_batches=__lowerCAmelCase )
A_ : List[Any] = torch.nn.Linear(1 , 1 )
A_ : int = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
A_ : List[str] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
A_ : int = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = create_accelerator()
A_ : List[str] = torch.nn.Linear(1 , 1 )
A_ : Dict = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Union[str, Any] = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
A_ : List[str] = accelerator.state.distributed_type
A_ : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCAmelCase )
A_ : Any = original_state
if __name__ == "__main__":
main()
| 712
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
| 0
|
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCamelCase_ : Optional[Any] = 300 # TEMPERATURE (unit = K)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559
|
from __future__ import annotations
def lowerCAmelCase( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return array
__a , __a = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
__a = _max - _min + 1
__a , __a = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__a = i - _min
__a = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__a = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
__a = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : str = input("""Enter numbers separated by comma:\n""")
lowerCamelCase_ : Tuple = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 559
| 1
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : str = logging.get_logger(__name__)
A : Optional[int] = {'vocab_file': 'spiece.model'}
A : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
A : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCamelCase( _a ):
snake_case_ : Any = VOCAB_FILES_NAMES
snake_case_ : Any = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : str , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
__snake_case = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__snake_case = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__snake_case = "<|endoftext|>" if eos_token is None else eos_token
__snake_case = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__snake_case = unk_token if pad_token is None else pad_token
__snake_case = eos_token if bos_token is None else bos_token
else:
__snake_case = "<pad>" if pad_token is None else pad_token
__snake_case = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
__snake_case = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__snake_case = re.compile(
f'''[{''.join(map(__A , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]''' )
def __getstate__( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
__snake_case = self.non_printing_characters_re.sub("" , __A )
# Normalize whitespaces
__snake_case = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__snake_case = unicodedata.normalize("NFC" , __A )
return text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = self.preprocess_text(__A )
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.PieceToId(__A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ) -> Dict:
'''simple docstring'''
return self.sp_model.IdToPiece(__A )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : str ) -> Dict:
'''simple docstring'''
return out_string
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case = []
__snake_case = ""
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(__A )
__snake_case = False
out_string += self.sp_model.decode(__A )
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Dict:
'''simple docstring'''
if isinstance(__A , __A ):
__snake_case = self.preprocess_text(__A )
__snake_case = self.sp_model.encode(__A )
else:
__snake_case = [self.preprocess_text(__A ) for t in text]
__snake_case = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
__snake_case = torch.tensor(__A )
return token_ids
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> Any:
'''simple docstring'''
return self.sp_model.decode(__A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : "Conversation" ) -> int:
'''simple docstring'''
__snake_case = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__snake_case = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__A )
| 711
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( _lowerCAmelCase = "" ) -> dict[str, float]:
'''simple docstring'''
__snake_case = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__snake_case = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" )
__snake_case = soup.find_all("td" , attrs="titleColumn" )
__snake_case = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def _lowerCAmelCase ( _lowerCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
__snake_case = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , "w" , newline="" ) as out_file:
__snake_case = csv.writer(_lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 473
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _lowerCamelCase:
def __init__( self, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[Any] = parent
_lowercase : Union[str, Any] = 13
_lowercase : Optional[Any] = 7
_lowercase : Optional[Any] = True
_lowercase : int = True
_lowercase : List[Any] = True
_lowercase : int = 99
_lowercase : List[Any] = 32
_lowercase : Optional[int] = 2
_lowercase : Tuple = 4
_lowercase : Any = 37
_lowercase : List[str] = 'gelu'
_lowercase : List[str] = 0.1
_lowercase : int = 0.1
_lowercase : Union[str, Any] = 5_12
_lowercase : Any = 16
_lowercase : Any = 2
_lowercase : List[Any] = 0.0_2
_lowercase : List[Any] = 3
_lowercase : Optional[int] = 4
_lowercase : Union[str, Any] = None
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[Any] = None
if self.use_input_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : str = None
_lowercase : Dict = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Tuple = EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
_lowercase : Optional[Any] = True
_lowercase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = TFEsmModel(config=lowerCamelCase)
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase : Tuple = model(lowerCamelCase)
_lowercase : List[Any] = [input_ids, input_mask]
_lowercase : Any = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Optional[int] = True
_lowercase : int = TFEsmModel(config=lowerCamelCase)
_lowercase : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Union[str, Any] = [input_ids, input_mask]
_lowercase : Any = model(lowerCamelCase, encoder_hidden_states=lowerCamelCase)
# Also check the case where encoder outputs are not passed
_lowercase : Optional[int] = model(lowerCamelCase, attention_mask=lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Tuple = TFEsmForMaskedLM(config=lowerCamelCase)
_lowercase : Union[str, Any] = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[Any] = self.num_labels
_lowercase : Tuple = TFEsmForTokenClassification(config=lowerCamelCase)
_lowercase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase : Union[str, Any] = model(lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[int] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ : Any = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : Any = False
lowercase_ : Optional[int] = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = TFEsmModelTester(self)
_lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = TFEsmModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@unittest.skip('Protein models do not support embedding resizing.')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.')
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowercase : str = model.get_bias()
assert isinstance(lowerCamelCase, lowerCamelCase)
for k, v in name.items():
assert isinstance(lowerCamelCase, tf.Variable)
else:
_lowercase : List[str] = model.get_output_embeddings()
assert x is None
_lowercase : List[str] = model.get_bias()
assert name is None
@require_tf
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
_lowercase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowercase : List[Any] = model(lowerCamelCase)[0]
_lowercase : Dict = [1, 6, 33]
self.assertEqual(list(output.numpy().shape), lowerCamelCase)
# compare the actual values for a slice.
_lowercase : Any = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
_lowercase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_lowercase : Optional[int] = model(lowerCamelCase)[0]
# compare the actual values for a slice.
_lowercase : int = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4))
| 89
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def UpperCamelCase ( *snake_case_ : Any , **snake_case_ : str)-> int:
pass
@is_pipeline_test
@require_vision
class __a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : Dict)-> List[str]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_) , [
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}],
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """c"""}, {"""score""": 0.3_3_3, """label""": """b"""}],
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@require_tf
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""")
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
self.assertEqual(
nested_simplify(snake_case_) , [{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@slow
@require_torch
def UpperCamelCase ( self : Any)-> Dict:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase ( self : Optional[int])-> int:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""")
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
| 354
| 0
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , UpperCamelCase_ : int = 1_01 ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = length
def __len__( self : Any ) -> Union[str, Any]:
return self.length
def __getitem__( self : List[str] , UpperCamelCase_ : Optional[int] ) -> int:
return i
class _SCREAMING_SNAKE_CASE:
def __call__( self : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
return {"input_ids": torch.tensor(UpperCamelCase_ ), "labels": torch.tensor(UpperCamelCase_ )}
class _SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self : List[Any] ) -> Tuple:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
SCREAMING_SNAKE_CASE__ :List[Any] = nn.Linear(1_20 , 80 )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=None ) -> str:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
@require_torch_neuroncore
def __lowerCamelCase ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ :Optional[int] = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
SCREAMING_SNAKE_CASE__ :str = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ :Tuple = f'''--output_dir {output_dir}'''.split()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ['torchrun'] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
@require_torch_multi_gpu
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :List[Any] = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
SCREAMING_SNAKE_CASE__ :List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ :Tuple = f'''--output_dir {output_dir}'''.split()
SCREAMING_SNAKE_CASE__ :List[Any] = ['torchrun'] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCamelCase_ = HfArgumentParser((TrainingArguments,))
UpperCamelCase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
UpperCamelCase_ = DummyDataset(dataset_length)
def lowerCamelCase ( UpperCAmelCase__ : EvalPrediction ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = list(range(len(UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE__ :Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
UpperCamelCase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCamelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase_ = 2
UpperCamelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase_ = None
| 700
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : int = LongformerTokenizer
A_ : int = True
A_ : Optional[Any] = LongformerTokenizerFast
A_ : Tuple = True
def __lowerCamelCase ( self : int ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ :Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE__ :Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase_ ) )
def __lowerCamelCase ( self : Tuple , **UpperCamelCase_ : Union[str, Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ :Any = 'lower newer'
SCREAMING_SNAKE_CASE__ :Optional[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ :List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ :Any = 'Encode this sequence.'
SCREAMING_SNAKE_CASE__ :int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE__ :Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE__ :Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE__ :Optional[Any] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[str]:
pass
def __lowerCamelCase ( self : List[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE__ :str = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE__ :int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE__ :str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __lowerCamelCase ( self : Dict ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE__ :str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Tuple = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE__ :Any = f'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
| 320
| 0
|
"""simple docstring"""
def _lowerCAmelCase(a : float , a : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(a ) * abs(a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 255
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =(3_2, 3_2)
_SCREAMING_SNAKE_CASE =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
def extract(*_A , **_A ):
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.ones([0] )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
self.pixel_values.to(_A )
return self
return Out()
return extract
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.dummy_cond_unet
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_A )
_SCREAMING_SNAKE_CASE =self.dummy_vae
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_SCREAMING_SNAKE_CASE =7_7
_SCREAMING_SNAKE_CASE =self.dummy_image.to(_A )
_SCREAMING_SNAKE_CASE =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline(
unet=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , safety_checker=_A , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_A )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.Generator(device=_A ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_A , )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =torch.Generator(device=_A ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_A , return_dict=_A , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_SCREAMING_SNAKE_CASE =np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.dummy_cond_unet
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_A )
_SCREAMING_SNAKE_CASE =self.dummy_vae
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_SCREAMING_SNAKE_CASE =7_7
_SCREAMING_SNAKE_CASE =self.dummy_image.to(_A )
# put models in fp16
_SCREAMING_SNAKE_CASE =unet.half()
_SCREAMING_SNAKE_CASE =vae.half()
_SCREAMING_SNAKE_CASE =bert.half()
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline(
unet=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , safety_checker=_A , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_A )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe(
[prompt] , generator=_A , num_inference_steps=2 , output_type='''np''' , image=_A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
_SCREAMING_SNAKE_CASE =init_image.resize((7_6_0, 5_0_4) )
_SCREAMING_SNAKE_CASE ='''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , generator=_A , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
_SCREAMING_SNAKE_CASE =image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_SCREAMING_SNAKE_CASE =np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_SCREAMING_SNAKE_CASE =init_image.resize((7_6_8, 5_1_2) )
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
_SCREAMING_SNAKE_CASE ='''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE =AltDiffusionImgaImgPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE ='''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , generator=_A , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 255
| 1
|
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
A = Mapping[str, np.ndarray]
A = Mapping[str, Any] # Is a nested dict.
A = 0.01
@dataclasses.dataclass(frozen=UpperCamelCase )
class UpperCAmelCase__ :
lowerCAmelCase_ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase_ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase_ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase_ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase_ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase_ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase_ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase_ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase_ : Optional[Sequence[int]] = None
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Protein:
A = R'(\[[A-Z]+\]\n)'
A = [tag.strip() for tag in re.split(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0]
A = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
A = ["N", "CA", "C"]
A = None
A = None
A = None
for g in groups:
if "[PRIMARY]" == g[0]:
A = g[1][0].strip()
for i in range(len(lowerCamelCase__ ) ):
if seq[i] not in residue_constants.restypes:
A = 'X' # FIXME: strings are immutable
A = np.array(
[residue_constants.restype_order.get(lowerCamelCase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
A = []
for axis in range(3 ):
tertiary.append(list(map(lowerCamelCase__ , g[1][axis].split() ) ) )
A = np.array(lowerCamelCase__ )
A = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase__ ):
A = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
A = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
A = np.zeros(
(
len(lowerCamelCase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase__ ):
A = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCamelCase__ , atom_mask=lowerCamelCase__ , aatype=lowerCamelCase__ , residue_index=np.arange(len(lowerCamelCase__ ) ) , b_factors=lowerCamelCase__ , )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ = 0 ) -> List[str]:
A = []
A = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
A = prot.parents
A = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
A = [p for i, p in zip(lowerCamelCase__ , lowerCamelCase__ ) if i == chain_id]
if parents is None or len(lowerCamelCase__ ) == 0:
A = ['N/A']
pdb_headers.append(f"""PARENT {' '.join(lowerCamelCase__ )}""" )
return pdb_headers
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
A = []
A = pdb_str.split('\n' )
A = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
A = 42
if prot.parents is not None and len(prot.parents ) > 0:
A = []
if prot.parents_chain_index is not None:
A = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCamelCase__ ) , [] )
parent_dict[str(lowerCamelCase__ )].append(lowerCamelCase__ )
A = max([int(lowerCamelCase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
A = parent_dict.get(str(lowerCamelCase__ ) , ['N/A'] )
parents_per_chain.append(lowerCamelCase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
A = [['N/A']]
def make_parent_line(lowerCamelCase__ ) -> str:
return f"""PARENT {' '.join(lowerCamelCase__ )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
A = 0
for i, l in enumerate(lowerCamelCase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCamelCase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCamelCase__ ):
A = parents_per_chain[chain_counter]
else:
A = ['N/A']
out_pdb_lines.append(make_parent_line(lowerCamelCase__ ) )
return "\n".join(lowerCamelCase__ )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> str:
A = residue_constants.restypes + ['X']
def res_atoa(lowerCamelCase__ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
A = residue_constants.atom_types
A = []
A = prot.atom_mask
A = prot.aatype
A = prot.atom_positions
A = prot.residue_index.astype(np.intaa )
A = prot.b_factors
A = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
A = get_pdb_headers(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
pdb_lines.extend(lowerCamelCase__ )
A = aatype.shape[0]
A = 1
A = 0
A = string.ascii_uppercase
A = None
# Add all atom sites.
for i in range(lowerCamelCase__ ):
A = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCamelCase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
A = 'ATOM'
A = atom_name if len(lowerCamelCase__ ) == 4 else f""" {atom_name}"""
A = ''
A = ''
A = 1.00
A = atom_name[0] # Protein supports only C, N, O, S, this works.
A = ''
A = 'A'
if chain_index is not None:
A = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
A = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(lowerCamelCase__ )
atom_index += 1
A = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
A = True
A = chain_index[i + 1]
if should_terminate:
# Close the chain.
A = 'TER'
A = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(lowerCamelCase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCamelCase__ , lowerCamelCase__ ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(lowerCamelCase__ )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=lowerCamelCase__ , remark=lowerCamelCase__ , parents=lowerCamelCase__ , parents_chain_index=lowerCamelCase__ , )
| 109
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A = logging.get_logger(__name__)
A = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : str = field(
default=UpperCamelCase ,metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase )} )
lowerCAmelCase_ : str = field(
default=UpperCamelCase ,metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
lowerCAmelCase_ : int = field(
default=1_28 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowerCAmelCase_ : int = field(
default=1_28 ,metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} ,)
lowerCAmelCase_ : int = field(
default=64 ,metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} ,)
lowerCAmelCase_ : int = field(
default=30 ,metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
lowerCAmelCase_ : float = field(
default=0.0 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ : int = field(
default=20 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ : int = field(
default=0 ,metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} ,)
lowerCAmelCase_ : int = field(default=1 ,metadata={"""help""": """multiple threads for converting example to features"""} )
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : str = """train"""
lowerCAmelCase_ : Union[str, Any] = """dev"""
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : SquadDataTrainingArguments
lowerCAmelCase_ : List[SquadFeatures]
lowerCAmelCase_ : Split
lowerCAmelCase_ : bool
def __init__( self : List[Any] , snake_case : SquadDataTrainingArguments , snake_case : PreTrainedTokenizer , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[bool] = False , snake_case : Optional[str] = None , snake_case : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
A = args
A = is_language_sensitive
A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case , snake_case ):
try:
A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A = mode
# Load data features from cache or dataset file
A = 'v2' if args.version_2_with_negative else 'v1'
A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
A = time.time()
A = torch.load(snake_case )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A = self.old_features['features']
A = self.old_features.get('dataset' , snake_case )
A = self.old_features.get('examples' , snake_case )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
A = self.processor.get_dev_examples(args.data_dir )
else:
A = self.processor.get_train_examples(args.data_dir )
A , A = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case , )
A = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , snake_case , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Any ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A = self.features[i]
A = torch.tensor(feature.input_ids , dtype=torch.long )
A = torch.tensor(feature.attention_mask , dtype=torch.long )
A = torch.tensor(feature.token_type_ids , dtype=torch.long )
A = torch.tensor(feature.cls_index , dtype=torch.long )
A = torch.tensor(feature.p_mask , dtype=torch.float )
A = torch.tensor(feature.is_impossible , dtype=torch.float )
A = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A = torch.tensor(feature.start_position , dtype=torch.long )
A = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 109
| 1
|
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
a , a :List[str] = position
a :Union[str, Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a :Any = []
for position in positions:
a , a :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase__ )
return permissible_positions
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
if is_complete(UpperCAmelCase__ ):
return True
for position in get_valid_pos(UpperCAmelCase__ , len(UpperCAmelCase__ ) ):
a , a :Any = position
if board[y][x] == 0:
a :Optional[Any] = curr + 1
if open_knight_tour_helper(UpperCAmelCase__ , UpperCAmelCase__ , curr + 1 ):
return True
a :List[Any] = 0
return False
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Dict = [[0 for i in range(UpperCAmelCase__ )] for j in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
a :Optional[int] = 1
if open_knight_tour_helper(UpperCAmelCase__ , (i, j) , 1 ):
return board
a :Tuple = 0
a :str = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 445
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
snake_case : Tuple = (7_20, 12_80) # Height, Width
snake_case : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
snake_case : str = 1 / 1_00
snake_case : List[Any] = ''
snake_case : Union[str, Any] = ''
snake_case : Tuple = ''
snake_case : List[str] = 2_50
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dataset(UpperCAmelCase__ ,UpperCAmelCase__ )
for index in range(UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = random.sample(range(len(UpperCAmelCase__ ) ) ,4 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = update_image_and_anno(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,filter_scale=UpperCAmelCase__ ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_SCREAMING_SNAKE_CASE = random_chars(32 )
_SCREAMING_SNAKE_CASE = path.split(os.sep )[-1].rsplit('.' ,1 )[0]
_SCREAMING_SNAKE_CASE = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCAmelCase__ ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_SCREAMING_SNAKE_CASE = []
for anno in new_annos:
_SCREAMING_SNAKE_CASE = anno[3] - anno[1]
_SCREAMING_SNAKE_CASE = anno[4] - anno[2]
_SCREAMING_SNAKE_CASE = anno[1] + width / 2
_SCREAMING_SNAKE_CASE = anno[2] + height / 2
_SCREAMING_SNAKE_CASE = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCAmelCase__ )
with open(f'''{file_root}.txt''' ,'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(UpperCAmelCase__ ,'*.txt' ) ):
_SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit('.' ,1 )[0]
with open(UpperCAmelCase__ ) as in_file:
_SCREAMING_SNAKE_CASE = in_file.readlines()
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ ,f'''{label_name}.jpg''' )
_SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
_SCREAMING_SNAKE_CASE = obj_list.rstrip('\n' ).split(' ' )
_SCREAMING_SNAKE_CASE = float(obj[1] ) - float(obj[3] ) / 2
_SCREAMING_SNAKE_CASE = float(obj[2] ) - float(obj[4] ) / 2
_SCREAMING_SNAKE_CASE = float(obj[1] ) + float(obj[3] ) / 2
_SCREAMING_SNAKE_CASE = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ = 0.0 ,):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_SCREAMING_SNAKE_CASE = int(scale_x * output_size[1] )
_SCREAMING_SNAKE_CASE = int(scale_y * output_size[0] )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for i, index in enumerate(UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = all_img_list[index]
path_list.append(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = all_annos[index]
_SCREAMING_SNAKE_CASE = cva.imread(UpperCAmelCase__ )
if i == 0: # top-left
_SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ ,(divid_point_x, divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = bbox[1] * scale_x
_SCREAMING_SNAKE_CASE = bbox[2] * scale_y
_SCREAMING_SNAKE_CASE = bbox[3] * scale_x
_SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ ,(output_size[1] - divid_point_x, divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = bbox[2] * scale_y
_SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ ,(divid_point_x, output_size[0] - divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = bbox[1] * scale_x
_SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
_SCREAMING_SNAKE_CASE = bbox[3] * scale_x
_SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_SCREAMING_SNAKE_CASE = cva.resize(
UpperCAmelCase__ ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
_SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_SCREAMING_SNAKE_CASE = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 605
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Any = q_groups
lowerCAmelCase__ : Any = k_groups
lowerCAmelCase__ : Union[str, Any] = v_groups
lowerCAmelCase__ : int = post_attention_groups
lowerCAmelCase__ : str = intermediate_groups
lowerCAmelCase__ : Union[str, Any] = output_groups
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
lowerCAmelCase__ : Any = model(a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(a , a , atol=1E-4 ) )
| 69
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def A__ ( __A : List[Any]=None ) ->Tuple:
if subparsers is not None:
__A =subparsers.add_parser('''env''' )
else:
__A =argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=_lowercase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def A__ ( __A : Optional[int] ) ->Optional[Any]:
__A =torch.__version__
__A =torch.cuda.is_available()
__A =is_xpu_available()
__A =is_npu_available()
__A ='Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowercase ):
__A =load_config_from_file(args.config_file ).to_dict()
__A ={
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(_lowercase ),
'PyTorch NPU available': str(_lowercase ),
'System RAM': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
__A =torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__A =(
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else F'''\t{accelerate_config}'''
)
print(_lowercase )
__A =accelerate_config
return info
def A__ ( ) ->int:
__A =env_command_parser()
__A =parser.parse_args()
env_command(_lowercase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 184
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase: List[Any] = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase: List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 266
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 591
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Any ):
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
UpperCamelCase__ = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(_a )
UpperCamelCase__ = replicate(_a )
UpperCamelCase__ = shard(_a )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(_a , jax.device_count() )
UpperCamelCase__ = sd_pipe(_a , _a , _a , num_inference_steps=25 , jit=_a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = '''stabilityai/stable-diffusion-2'''
UpperCamelCase__ , UpperCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(_a , subfolder='''scheduler''' )
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
_a , scheduler=_a , revision='''bf16''' , dtype=jnp.bfloataa , )
UpperCamelCase__ = scheduler_params
UpperCamelCase__ = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(_a )
UpperCamelCase__ = replicate(_a )
UpperCamelCase__ = shard(_a )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(_a , jax.device_count() )
UpperCamelCase__ = sd_pipe(_a , _a , _a , num_inference_steps=25 , jit=_a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 591
| 1
|
"""simple docstring"""
def a ( __UpperCAmelCase : list[list[float]] ) -> list[list[float]]:
__magic_name__: list[list[float]] = []
for data in source_data:
for i, el in enumerate(__UpperCAmelCase ):
if len(__UpperCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__UpperCAmelCase ) )
return data_lists
def a ( __UpperCAmelCase : list[list[float]] , __UpperCAmelCase : list[int] ) -> list[list[float]]:
__magic_name__: list[list[float]] = []
for dlist, weight in zip(__UpperCAmelCase , __UpperCAmelCase ):
__magic_name__: List[Any] = min(__UpperCAmelCase )
__magic_name__: Any = max(__UpperCAmelCase )
__magic_name__: list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__magic_name__: Union[str, Any] = f'Invalid weight of {weight:f} provided'
raise ValueError(__UpperCAmelCase )
score_lists.append(__UpperCAmelCase )
return score_lists
def a ( __UpperCAmelCase : list[list[float]] ) -> list[float]:
__magic_name__: list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__UpperCAmelCase ):
__magic_name__: List[Any] = final_scores[j] + ele
return final_scores
def a ( __UpperCAmelCase : list[list[float]] , __UpperCAmelCase : list[int] ) -> list[list[float]]:
__magic_name__: Dict = get_data(__UpperCAmelCase )
__magic_name__: int = calculate_each_score(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[Any] = generate_final_scores(__UpperCAmelCase )
# append scores to source data
for i, ele in enumerate(__UpperCAmelCase ):
source_data[i].append(__UpperCAmelCase )
return source_data
| 96
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
__lowerCamelCase = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self : Tuple , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : str=True , __snake_case : str="[UNK]" , __snake_case : int="[SEP]" , __snake_case : int="[PAD]" , __snake_case : Any="[CLS]" , __snake_case : List[Any]="[MASK]" , __snake_case : Union[str, Any]=True , __snake_case : Dict=None , **__snake_case : str , ) -> List[Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
__magic_name__: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __snake_case ) != tokenize_chinese_chars
):
__magic_name__: Dict = getattr(__snake_case , normalizer_state.pop("""type""" ) )
__magic_name__: Optional[Any] = do_lower_case
__magic_name__: Any = strip_accents
__magic_name__: Any = tokenize_chinese_chars
__magic_name__: Union[str, Any] = normalizer_class(**__snake_case )
__magic_name__: Optional[Any] = do_lower_case
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : int=None ) -> Optional[int]:
__magic_name__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: Dict = [self.sep_token_id]
__magic_name__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
__magic_name__: Any = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 96
| 1
|
'''simple docstring'''
import re
def _snake_case ( A_ : str ):
"""simple docstring"""
if len(re.findall("""[ATCG]""" , __snake_case ) ) != len(__snake_case ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case: Optional[Any] = logging.get_logger(__name__)
__snake_case: Tuple = {"vocab_file": "sentencepiece.model"}
__snake_case: int = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
__snake_case: Union[str, Any] = {
"google/rembert": 2_56,
}
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ : int = do_lower_case
a_ : int = remove_space
a_ : Dict = keep_accents
a_ : Union[str, Any] = vocab_file
a_ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.__dict__.copy()
a_ : Optional[Any] = None
return state
def __setstate__( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : int = d
a_ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
a_ : Tuple = self.sp_model.EncodeAsPieces(lowerCAmelCase_ )
return pieces
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Tuple = self.sp_model.decode_pieces(lowerCAmelCase_ )
return out_string
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
a_ : List[Any] = [self.sep_token_id]
a_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
a_ : Optional[int] = [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
a_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 460
| 0
|
import re
from filelock import FileLock
try:
import nltk
__A = True
except (ImportError, ModuleNotFoundError):
__A = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 68
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _snake_case ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head('https://huggingface.co' )
| 91
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=6 , __a=17 , __a=23 , __a=11 , __a=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = act_dim
_UpperCamelCase = state_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_length
_UpperCamelCase = is_training
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00)
_UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length))
_UpperCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = DecisionTransformerModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , __a , __a , __a , __a , __a)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowercase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DecisionTransformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DecisionTransformerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__a)] , __a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform
_UpperCamelCase = 10 # defined by the RL environment, may be normalized
_UpperCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''')
_UpperCamelCase = model.to(__a)
_UpperCamelCase = model.config
torch.manual_seed(0)
_UpperCamelCase = torch.randn(1 , 1 , config.state_dim).to(device=__a , dtype=torch.floataa) # env.reset()
_UpperCamelCase = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=__a)
_UpperCamelCase = torch.tensor(__a , device=__a , dtype=torch.floataa).reshape(1 , 1 , 1)
_UpperCamelCase = state
_UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa)
_UpperCamelCase = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa)
_UpperCamelCase = torch.tensor(0 , device=__a , dtype=torch.long).reshape(1 , 1)
for step in range(__a):
_UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a)] , dim=1)
_UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=__a)] , dim=1)
_UpperCamelCase = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=__a , dtype=torch.floataa),
1.0,
False,
{},
)
_UpperCamelCase = action_pred[0, -1]
_UpperCamelCase = torch.cat([states, state] , dim=1)
_UpperCamelCase = returns_to_go[0, -1] - reward
_UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
_UpperCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long) * (step + 1)] , dim=1)
| 703
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_a = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_a = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_a = F"""down_blocks.{i}.resnets.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_a = F"""down_blocks.{i}.attentions.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_a = F"""up_blocks.{i}.resnets.{j}."""
_a = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_a = F"""up_blocks.{i}.attentions.{j}."""
_a = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_a = F"""down_blocks.{i}.downsamplers.0.conv."""
_a = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_a = """mid_block.attentions.0."""
_a = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_a = F"""mid_block.resnets.{j}."""
_a = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_a = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_a = F"""encoder.down_blocks.{i}.resnets.{j}."""
_a = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_a = F"""down_blocks.{i}.downsamplers.0."""
_a = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_a = F"""decoder.up_blocks.{i}.resnets.{j}."""
_a = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_a = F"""mid_block.resnets.{i}."""
_a = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_a = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
return w.reshape(*w.shape, 1, 1 )
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_UpperCamelCase = reshape_weight_for_sd(__snake_case )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_a = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_a = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_a = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_UpperCamelCase = k[: -len('''.q_proj.weight''' )]
_UpperCamelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_UpperCamelCase = k[: -len('''.q_proj.bias''' )]
_UpperCamelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
return new_state_dict
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_a = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_a = load_file(unet_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_a = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_a = load_file(vae_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_a = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_a = load_file(text_enc_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_a = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_a = convert_unet_state_dict(unet_state_dict)
_a = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_a = convert_vae_state_dict(vae_state_dict)
_a = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_a = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_a = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_a = convert_text_enc_state_dict_vaa(text_enc_dict)
_a = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_a = convert_text_enc_state_dict(text_enc_dict)
_a = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_a = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_a = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_a = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78
| 0
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =RoCBertTokenizer
_lowerCAmelCase =None
_lowerCAmelCase =False
_lowerCAmelCase =True
_lowerCAmelCase =filter_non_english
def UpperCAmelCase__ ( self : Optional[int] ):
super().setUp()
snake_case__ : int = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case__ : Any = {}
snake_case__ : Tuple = {}
for i, value in enumerate(_lowerCamelCase ):
snake_case__ : Dict = i
snake_case__ : Optional[int] = i
snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
snake_case__ : List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case__ : List[str] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_lowerCamelCase , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Optional[int] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase__ ( self : Dict ):
snake_case__ : int = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase__ ( self : str ):
snake_case__ : Dict = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase__ ( self : int ):
snake_case__ : Dict = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : List[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase__ ( self : str ):
snake_case__ : int = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case__ : Union[str, Any] = {}
for i, token in enumerate(_lowerCamelCase ):
snake_case__ : List[Any] = i
snake_case__ : int = RoCBertWordpieceTokenizer(vocab=_lowerCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCAmelCase__ ( self : List[Any] ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase__ ( self : int ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase__ ( self : List[str] ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCAmelCase__ ( self : int ):
snake_case__ : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
snake_case__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def UpperCAmelCase__ ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
snake_case__ : int = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
snake_case__ : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , 'do_lower_case' ) else False
snake_case__ : Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Any = ['的', '人', '有']
snake_case__ : List[Any] = ''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Optional[Any] = True
snake_case__ : Any = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : List[Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : Union[str, Any] = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
snake_case__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Optional[int] = False
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Optional[Any] = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : Optional[int] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : int = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
snake_case__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ : Optional[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case__ : Any = tokenizer.encode('你好' , add_special_tokens=_lowerCamelCase )
snake_case__ : Tuple = tokenizer.encode('你是谁' , add_special_tokens=_lowerCamelCase )
snake_case__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
snake_case__ : int = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : int = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case__ : str = '你好,你是谁'
snake_case__ : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase )
snake_case__ : List[str] = tokenizer.prepare_for_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : List[str] = tokenizer.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 170
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowercase__( A , A , A=0 ):
# Format the message.
if name is None:
snake_case__ : Dict = None
else:
snake_case__ : Optional[int] = '.' * max(0 , spaces - 2 ) + '# {:' + str(5_0 - spaces ) + 's}'
snake_case__ : str = fmt.format(A )
# Print and recurse (if needed).
if isinstance(A , A ):
if msg is not None:
print(A )
for k in val.keys():
recursive_print(A , val[k] , spaces + 2 )
elif isinstance(A , torch.Tensor ):
print(A , ':' , val.size() )
else:
print(A , ':' , A )
def lowercase__( A , A , A , A , A ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case__ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case__ : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case__ : int = param.view(*A )
snake_case__ : Tuple = param.transpose(0 , 2 )
snake_case__ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case__ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case__ : Any = param.view(*A )
snake_case__ : Optional[int] = param.transpose(0 , 1 ).contiguous()
snake_case__ : List[Any] = param.view(*A )
return param
def lowercase__( A , A , A ):
# The converted output model.
snake_case__ : Optional[Any] = {}
# old versions did not store training args
snake_case__ : Any = input_state_dict.get('args' , A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case__ : str = ds_args.padded_vocab_size
snake_case__ : Any = ds_args.max_position_embeddings
snake_case__ : Optional[int] = ds_args.hidden_size
snake_case__ : str = ds_args.num_layers
snake_case__ : List[Any] = ds_args.num_attention_heads
snake_case__ : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case__ : int = config.n_head
# The hidden_size per head.
snake_case__ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case__ : Optional[Any] = input_state_dict['checkpoint_version']
else:
snake_case__ : Tuple = 0.0
# The model.
snake_case__ : Dict = input_state_dict['model']
# The language model.
snake_case__ : int = model['language_model']
# The embeddings.
snake_case__ : Tuple = lm['embedding']
# The word embeddings.
snake_case__ : Tuple = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
snake_case__ : int = word_embeddings[: config.vocab_size, :]
snake_case__ : List[str] = word_embeddings
# The position embeddings.
snake_case__ : Union[str, Any] = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case__ : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case__ : int = pos_embeddings
# The transformer.
snake_case__ : Optional[Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
snake_case__ : Union[str, Any] = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
snake_case__ : Any = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case__ : Dict = layer_re.match(A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case__ : Dict = int(m.group(1 ) )
# The name of the operation.
snake_case__ : List[Any] = m.group(2 )
# Is it a weight or a bias?
snake_case__ : Tuple = m.group(3 )
# The name of the layer.
snake_case__ : int = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
snake_case__ : Union[str, Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
snake_case__ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case__ : List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , A , A )
snake_case__ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case__ : Optional[int] = torch.tensor(-1e4 , dtype=torch.floataa )
snake_case__ : int = masked_bias
snake_case__ : List[Any] = fix_query_key_value_ordering(A , A , 3 , A , A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case__ : List[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case__ : Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case__ : int = fix_query_key_value_ordering(A , A , 3 , A , A )
# Store. No change of shape.
snake_case__ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case__ : List[Any] = megatron_to_transformers[op_name]
snake_case__ : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case__ : List[Any] = megatron_to_transformers[op_name]
snake_case__ : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case__ : Any = transformer['final_layernorm.weight']
snake_case__ : Optional[int] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case__ : Optional[Any] = word_embeddings
# It should be done!
return output_state_dict
def lowercase__( ):
# Create the argument parser.
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=A , help='An optional config json file describing the pre-trained model.' , )
snake_case__ : Dict = parser.parse_args()
# Extract the basename.
snake_case__ : str = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
snake_case__ : Any = torch.load(A , map_location='cpu' )
else:
snake_case__ : Optional[int] = torch.load(args.path_to_checkpoint , map_location='cpu' )
snake_case__ : List[Any] = input_state_dict.get('args' , A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case__ : Optional[int] = 'gelu_fast'
elif ds_args.openai_gelu:
snake_case__ : Optional[Any] = 'gelu_new'
else:
snake_case__ : str = 'gelu'
else:
# in the very early days this used to be "gelu_new"
snake_case__ : List[str] = 'gelu_new'
# Spell out all parameters in case the defaults change.
snake_case__ : str = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=A , summary_activation=A , summary_proj_to_labels=A , summary_first_dropout=0.1 , scale_attn_weights=A , use_cache=A , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case__ : str = GPTaConfig.from_json_file(args.config_file )
snake_case__ : Optional[Any] = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
snake_case__ : Dict = convert_megatron_checkpoint(A , A , A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A , A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case__ : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case__ : Optional[int] = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
snake_case__ : Optional[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case__ : str = 'gpt2'
snake_case__ : int = AutoTokenizer.from_pretrained(A )
snake_case__ : Optional[int] = type(A ).__name__
snake_case__ : Tuple = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(A )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(A )
# Store the state_dict to file.
snake_case__ : Optional[Any] = os.path.join(A , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(A , A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 170
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 25
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase : int = logging.getLogger(__name__)
def UpperCamelCase_ ( __a=2 , __a=3 , __a=16 , __a = 10 , __a = 2 ) -> Optional[int]:
def get_dataset(__a ):
a__ : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__a , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
a__ : str = get_dataset(__a )
a__ : Union[str, Any] = get_dataset(__a )
a__ : str = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
a__ : str = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a=None ) -> Optional[int]:
a__ : Optional[Any] = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
a__, a__ : str = batch
a__ : Optional[int] = model(__a )
a__ : Tuple = torch.nn.functional.mse_loss(__a , __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
super().__init__()
a__ : List[Any] = nn.Parameter(torch.randn(1 ) )
a__ : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def _UpperCamelCase( self : int , lowerCamelCase__ : List[Any] ):
return x * self.a + self.b
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : Optional[int] = DummyModel()
a__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : int = dummy_dataloaders()
a__ : List[Any] = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
a__ : int = Accelerator(project_config=lowerCamelCase__ )
a__, a__, a__, a__ : str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _UpperCamelCase( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : Union[str, Any] = DummyModel()
a__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : Any = dummy_dataloaders()
# Train baseline
a__ : Optional[Any] = Accelerator()
a__, a__, a__, a__ : str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
a__ : Optional[int] = os.path.join(lowerCamelCase__ , "initial" )
accelerator.save_state(lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Optional[int] = optimizer.state_dict()
a__ : Any = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : Dict = model.a.item(), model.b.item()
a__ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ : List[Any] = DummyModel()
a__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : str = dummy_dataloaders()
a__ : Any = Accelerator()
a__, a__, a__, a__ : List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
a__ : Tuple = os.path.join(lowerCamelCase__ , "checkpoint" )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Optional[Any] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : int = DummyModel()
a__ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : List[str] = dummy_dataloaders()
a__ : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
a__ : int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__, a__, a__, a__ : List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
((a__), (a__)) : Optional[Any] = model.a.item(), model.b.item()
a__ : List[Any] = optimizer.state_dict()
a__ : Union[str, Any] = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : str = model.a.item(), model.b.item()
a__ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ : str = DummyModel()
a__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__, a__ : Optional[int] = dummy_dataloaders()
a__ : str = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
a__ : str = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__, a__, a__, a__ : str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
((a__), (a__)) : int = model.a.item(), model.b.item()
a__ : List[str] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((a__), (a__)) : Any = model.a.item(), model.b.item()
a__ : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : int = torch.tensor([1, 2, 3] )
a__ : List[Any] = torch.tensor([2, 3, 4] )
a__ : Tuple = DummyModel()
a__ : List[str] = torch.optim.Adam(net.parameters() )
a__ : Optional[Any] = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _UpperCamelCase( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : List[Any] = DummyModel()
a__ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
a__ : List[Any] = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.99 )
a__, a__ : Optional[int] = dummy_dataloaders()
a__ : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
a__ : int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__, a__, a__, a__, a__ : List[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
a__ : Optional[int] = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def _UpperCamelCase( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ : int = DummyModel()
a__ : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
a__ : Tuple = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
a__ : Optional[Any] = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def _UpperCamelCase( self : Tuple ):
a__ : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = """/tmp/accelerate/state_checkpointing"""
UpperCamelCase : Optional[int] = DummyModel()
UpperCamelCase : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCamelCase : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase , UpperCamelCase : Tuple = dummy_dataloaders()
UpperCamelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase : Union[str, Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase : Tuple = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase : Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
UpperCamelCase : str = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
UpperCamelCase : Tuple = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 37
|
from math import factorial, radians
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 18 , lowerCAmelCase_ = 10):
'''simple docstring'''
lowerCamelCase_ : List[str] = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
lowerCamelCase_ : Tuple = radians(lowerCAmelCase_)
lowerCamelCase_ : Tuple = angle_in_radians
lowerCamelCase_ : Tuple = 3
lowerCamelCase_ : List[Any] = -1
for _ in range(lowerCAmelCase_):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase_)
lowerCamelCase_ : List[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 250
| 0
|
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
snake_case = logging.getLogger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = np.argmax(lowerCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def snake_case ( lowerCAmelCase_ ) -> Optional[int]:
with open(lowerCAmelCase_ , encoding='''utf_8''' ) as f:
_snake_case = csv.reader(lowerCAmelCase_ )
_snake_case = []
next(lowerCAmelCase_ ) # skip the first line
for line in tqdm(lowerCAmelCase_ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = []
for dataset in encoded_datasets:
_snake_case = len(lowerCAmelCase_ )
_snake_case = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_snake_case = np.zeros((n_batch, 2) , dtype=np.intaa )
_snake_case = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_snake_case = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCAmelCase_ ):
_snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_snake_case = with_conta
_snake_case = with_conta
_snake_case = len(lowerCAmelCase_ ) - 1
_snake_case = len(lowerCAmelCase_ ) - 1
_snake_case = with_conta
_snake_case = with_conta
_snake_case = mc_label
_snake_case = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def snake_case ( ) -> Optional[int]:
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase_ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=lowerCAmelCase_ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=lowerCAmelCase_ , default='''''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--num_train_epochs''' , type=lowerCAmelCase_ , default=3 )
parser.add_argument('''--train_batch_size''' , type=lowerCAmelCase_ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=lowerCAmelCase_ , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowerCAmelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=lowerCAmelCase_ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=lowerCAmelCase_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=lowerCAmelCase_ , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowerCAmelCase_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=lowerCAmelCase_ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=lowerCAmelCase_ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=lowerCAmelCase_ , default=0.9 )
parser.add_argument('''--n_valid''' , type=lowerCAmelCase_ , default=374 )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case = parser.parse_args()
print(lowerCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_snake_case = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_snake_case = ['''_start_''', '''_delimiter_''', '''_classify_''']
_snake_case = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
_snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
model.to(lowerCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCAmelCase_ ) )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return obj
return [tokenize_and_encode(lowerCAmelCase_ ) for o in obj]
logger.info('''Encoding dataset...''' )
_snake_case = load_rocstories_dataset(args.train_dataset )
_snake_case = load_rocstories_dataset(args.eval_dataset )
_snake_case = (train_dataset, eval_dataset)
_snake_case = tokenize_and_encode(lowerCAmelCase_ )
# Compute the max input length for the Transformer
_snake_case = model.config.n_positions // 2 - 2
_snake_case = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_snake_case = min(lowerCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_snake_case = pre_process_datasets(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
_snake_case , _snake_case = tensor_datasets[0], tensor_datasets[1]
_snake_case = TensorDataset(*lowerCAmelCase_ )
_snake_case = RandomSampler(lowerCAmelCase_ )
_snake_case = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.train_batch_size )
_snake_case = TensorDataset(*lowerCAmelCase_ )
_snake_case = SequentialSampler(lowerCAmelCase_ )
_snake_case = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_snake_case = args.max_steps
_snake_case = args.max_steps // (len(lowerCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
_snake_case = len(lowerCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
_snake_case = list(model.named_parameters() )
_snake_case = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
_snake_case = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
_snake_case = AdamW(lowerCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
_snake_case = get_linear_schedule_with_warmup(
lowerCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCAmelCase_ )
if args.do_train:
_snake_case , _snake_case , _snake_case = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
_snake_case = 0
_snake_case = 0
_snake_case = tqdm(lowerCAmelCase_ , desc='''Training''' )
for step, batch in enumerate(lowerCAmelCase_ ):
_snake_case = tuple(t.to(lowerCAmelCase_ ) for t in batch )
_snake_case , _snake_case , _snake_case , _snake_case = batch
_snake_case = model(lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
_snake_case = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_snake_case = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_snake_case = '''Training loss: {:.2e} lr: {:.2e}'''.format(lowerCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_snake_case = model.module if hasattr(lowerCAmelCase_ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_snake_case = os.path.join(args.output_dir , lowerCAmelCase_ )
_snake_case = os.path.join(args.output_dir , lowerCAmelCase_ )
torch.save(model_to_save.state_dict() , lowerCAmelCase_ )
model_to_save.config.to_json_file(lowerCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_snake_case = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCAmelCase_ )
if args.do_eval:
model.eval()
_snake_case , _snake_case = 0, 0
_snake_case , _snake_case = 0, 0
for batch in tqdm(lowerCAmelCase_ , desc='''Evaluating''' ):
_snake_case = tuple(t.to(lowerCAmelCase_ ) for t in batch )
_snake_case , _snake_case , _snake_case , _snake_case = batch
with torch.no_grad():
_snake_case , _snake_case , _snake_case , _snake_case = model(
lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
_snake_case = mc_logits.detach().cpu().numpy()
_snake_case = mc_labels.to('''cpu''' ).numpy()
_snake_case = accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_snake_case = eval_loss / nb_eval_steps
_snake_case = eval_accuracy / nb_eval_examples
_snake_case = tr_loss / nb_tr_steps if args.do_train else None
_snake_case = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
_snake_case = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(lowerCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCAmelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 404
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class UpperCAmelCase :
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
_snake_case = start
_snake_case = end
_snake_case = val
_snake_case = (start + end) // 2
_snake_case = left
_snake_case = right
def __repr__( self : List[str] ):
"""simple docstring"""
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class UpperCAmelCase :
def __init__( self : Dict , __lowerCamelCase : Sequence , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = collection
_snake_case = function
if self.collection:
_snake_case = self._build_tree(0 , len(__lowerCamelCase ) - 1 )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict ):
"""simple docstring"""
self._update_tree(self.root , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
return self._query_range(self.root , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.collection[start] )
_snake_case = (start + end) // 2
_snake_case = self._build_tree(__lowerCamelCase , __lowerCamelCase )
_snake_case = self._build_tree(mid + 1 , __lowerCamelCase )
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.fn(left.val , right.val ) , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if node.start == i and node.end == i:
_snake_case = val
return
if i <= node.mid:
self._update_tree(node.left , __lowerCamelCase , __lowerCamelCase )
else:
self._update_tree(node.right , __lowerCamelCase , __lowerCamelCase )
_snake_case = self.fn(node.left.val , node.right.val )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __lowerCamelCase , __lowerCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __lowerCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if self.root is not None:
_snake_case = Queue()
queue.put(self.root )
while not queue.empty():
_snake_case = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 5_0)
snake_case = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 404
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase = 'facebook/wmt19-en-de'
_lowerCamelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCamelCase = tokenizer(['Making tiny model'], return_tensors='pt')
_lowerCamelCase = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
_lowerCamelCase = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 6
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 42
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self ,a_ = 16 ,a_ = 88 ,a_ = None ,a_ = None ,a_ = 1 ,a_ = 0.0 ,a_ = 32 ,a_ = None ,a_ = False ,a_ = None ,a_ = "geglu" ,a_ = True ,a_ = True ,):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = num_attention_heads * attention_head_dim
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=a_ ,num_channels=a_ ,eps=1e-6 ,affine=a_ )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
# 3. Define transformers blocks
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
a_ ,a_ ,a_ ,dropout=a_ ,cross_attention_dim=a_ ,activation_fn=a_ ,attention_bias=a_ ,double_self_attention=a_ ,norm_elementwise_affine=a_ ,)
for d in range(a_ )
] )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,a_=None ,a_=None ,a_=1 ,a_=None ,a_ = True ,):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = batch_frames // num_frames
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = hidden_states[None, :].reshape(a_ ,a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,2 ,1 ,3 ,4 )
lowerCAmelCase__ = self.norm(a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,a_ ,a_ )
lowerCAmelCase__ = self.proj_in(a_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ = block(
a_ ,encoder_hidden_states=a_ ,timestep=a_ ,cross_attention_kwargs=a_ ,class_labels=a_ ,)
# 3. Output
lowerCAmelCase__ = self.proj_out(a_ )
lowerCAmelCase__ = (
hidden_states[None, None, :]
.reshape(a_ ,a_ ,a_ ,a_ ,a_ )
.permute(0 ,3 ,4 ,1 ,2 )
.contiguous()
)
lowerCAmelCase__ = hidden_states.reshape(a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a_ )
| 193
| 0
|
"""simple docstring"""
import socket
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : Any = socket.gethostname()
snake_case__ : Dict = 1_23_12
sock.connect((host, port) )
sock.send(b"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case__ : List[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(_UpperCAmelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 150
|
"""simple docstring"""
import math
import unittest
def A__ ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase):
'''simple docstring'''
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__):
is_prime(-19)
self.assertFalse(
is_prime(0) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 150
| 1
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ : List[str] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase_ : Dict = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : str = VOCAB_FILES_NAMES
lowerCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : str = ["input_ids", "attention_mask"]
lowerCAmelCase__ : Tuple = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : List[Any]="<pad>" , lowerCamelCase : str=1_0_0 , lowerCamelCase : Tuple=None , **lowerCamelCase : Tuple , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
a__ = [F'''<extra_id_{i}>''' for i in range(lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
a__ = len(set(filter(lambda lowerCamelCase : bool("extra_id_" in str(lowerCamelCase ) ) , lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , extra_ids=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
a__ = vocab_file
a__ = False if not self.vocab_file else True
a__ = extra_ids
@staticmethod
def __a ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
a__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCamelCase , )
return max_model_length
def __a ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __a ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
a__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __a ( self : Optional[int] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __a ( self : Optional[int] ):
'''simple docstring'''
return list(
set(filter(lambda lowerCamelCase : bool(re.search(r"<extra_id_\d+>" , lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(lowerCamelCase ) for token in self.get_sentinel_tokens()]
| 489
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : int = KandinskyVaaControlnetPipeline
lowerCAmelCase__ : Union[str, Any] = ["image_embeds", "negative_image_embeds", "hint"]
lowerCAmelCase__ : Any = ["image_embeds", "negative_image_embeds", "hint"]
lowerCAmelCase__ : Dict = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase__ : Optional[int] = False
@property
def __a ( self : Dict ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Tuple ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 1_0_0
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
a__ = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a__ = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
a__ = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self : str ):
'''simple docstring'''
a__ = self.dummy_unet
a__ = self.dummy_movq
a__ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase , )
a__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __a ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 ):
'''simple docstring'''
a__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
a__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
# create hint
a__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if str(lowerCamelCase ).startswith("mps" ):
a__ = torch.manual_seed(lowerCamelCase )
else:
a__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
a__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __a ( self : Any ):
'''simple docstring'''
a__ = "cpu"
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**lowerCamelCase )
a__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
a__ = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
a__ = output.images
a__ = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a__ = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
a__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
a__ = torch.from_numpy(np.array(lowerCamelCase ) ).float() / 255.0
a__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
a__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
a__ = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
a__ = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
a__ = "A robot, 4k photo"
a__ = torch.Generator(device="cuda" ).manual_seed(0 )
a__ , a__ = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a__ = torch.Generator(device="cuda" ).manual_seed(0 )
a__ = pipeline(
image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , hint=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=1_0_0 , output_type="np" , )
a__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 489
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = """ssube/stable-diffusion-x4-upscaler-onnx"""
def A ( self : Any , __snake_case : Union[str, Any]=0 ) -> Dict:
UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__snake_case ) )
UpperCAmelCase : Optional[Any] = torch.manual_seed(__snake_case )
UpperCAmelCase : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : List[Any] = self.get_dummy_inputs()
UpperCAmelCase : List[Any] = pipe(**__snake_case ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : Dict = pipe(**__snake_case ).images
UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : List[str] ) -> str:
UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Any = self.get_dummy_inputs()
UpperCAmelCase : Any = pipe(**__snake_case ).images
UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Any = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : int ) -> Optional[int]:
UpperCAmelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : List[Any] = self.get_dummy_inputs()
UpperCAmelCase : int = pipe(**__snake_case ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Dict = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Any = pipe(**__snake_case ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self : Tuple ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase : int = ort.SessionOptions()
UpperCAmelCase : Tuple = False
return options
def A ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Tuple = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Any = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase : str = output.images
UpperCAmelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase : str = init_image.resize((128, 128) )
UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Optional[Any] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : List[Any] = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase : int = output.images
UpperCAmelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Optional[Any] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 721
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """time_series_transformer"""
lowerCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : str , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "student_t" , __snake_case : str = "nll" , __snake_case : int = 1 , __snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , __snake_case : Optional[Union[str, bool]] = "mean" , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : Optional[List[int]] = None , __snake_case : Optional[List[int]] = None , __snake_case : int = 32 , __snake_case : int = 32 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : bool = True , __snake_case : str = "gelu" , __snake_case : int = 64 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 100 , __snake_case : float = 0.02 , __snake_case : Optional[Any]=True , **__snake_case : List[Any] , ) -> List[str]:
# time series specific configuration
UpperCAmelCase : List[Any] = prediction_length
UpperCAmelCase : List[Any] = context_length or prediction_length
UpperCAmelCase : Tuple = distribution_output
UpperCAmelCase : Optional[Any] = loss
UpperCAmelCase : Tuple = input_size
UpperCAmelCase : Optional[int] = num_time_features
UpperCAmelCase : Dict = lags_sequence
UpperCAmelCase : Any = scaling
UpperCAmelCase : Tuple = num_dynamic_real_features
UpperCAmelCase : Any = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Any = cardinality
else:
UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Optional[int] = embedding_dimension
else:
UpperCAmelCase : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : int = input_size * len(__snake_case ) + self._number_of_features
UpperCAmelCase : int = d_model
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : str = decoder_attention_heads
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : Any = decoder_ffn_dim
UpperCAmelCase : Any = encoder_layers
UpperCAmelCase : str = decoder_layers
UpperCAmelCase : Optional[int] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Optional[int] = encoder_layerdrop
UpperCAmelCase : Dict = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Optional[int] = use_cache
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A ( self : Union[str, Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 528
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def _snake_case ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
lowerCAmelCase = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 433
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = tmp_path / """file.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tmp_path / """malformed_file.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_image.csv"""
lowerCAmelCase = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_label.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_int_list.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = Csv()
lowerCAmelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read().splitlines()[1]
lowerCAmelCase = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowerCAmelCase = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowerCAmelCase = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read().splitlines()[1:]
lowerCAmelCase = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowerCAmelCase = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowerCAmelCase = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
lowerCAmelCase = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowerCAmelCase = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 433
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase__ :
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Tuple=13 , _lowercase : Dict=30 , _lowercase : List[str]=2 , _lowercase : Any=3 , _lowercase : List[str]=True , _lowercase : Any=True , _lowercase : Any=32 , _lowercase : List[Any]=5 , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=37 , _lowercase : Tuple="gelu" , _lowercase : Any=0.1 , _lowercase : Dict=0.1 , _lowercase : str=10 , _lowercase : Dict=0.0_2 , _lowercase : List[Any]=3 , _lowercase : List[str]=None , _lowercase : Any=2 , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ = (image_size // patch_size) ** 2
UpperCAmelCase__ = num_patches + 2
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : str , _lowercase : int , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = DeiTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : List[Any] , _lowercase : Optional[int] , _lowercase : int , _lowercase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = DeiTForMaskedImageModeling(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(_lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = DeiTForMaskedImageModeling(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(_lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = DeiTForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = DeiTForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A__= (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A__= False
A__= False
A__= False
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = DeiTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_lowercase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : int=False ):
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowercase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ = model_class(_lowercase )
model.to(_lowercase )
model.train()
UpperCAmelCase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCAmelCase__ = model(**_lowercase ).loss
loss.backward()
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ = False
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ = model_class(_lowercase )
model.gradient_checkpointing_enable()
model.to(_lowercase )
model.train()
UpperCAmelCase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCAmelCase__ = model(**_lowercase ).loss
loss.backward()
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowercase ),
*get_values(_lowercase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCAmelCase__ = problem_type["title"]
UpperCAmelCase__ = problem_type["num_labels"]
UpperCAmelCase__ = model_class(_lowercase )
model.to(_lowercase )
model.train()
UpperCAmelCase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase__ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowercase ) as warning_list:
UpperCAmelCase__ = model(**_lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = DeiTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_lowercase )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**_lowercase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
UpperCAmelCase__ = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=_lowercase , return_tensors="pt" )
UpperCAmelCase__ = inputs.pixel_values.to(_lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ = model(_lowercase )
| 277
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __UpperCAmelCase ( __A = True , *__A , **__A ) -> Any:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
UpperCAmelCase__ = False
if main_process_only:
UpperCAmelCase__ = PartialState().local_process_index == 0
return _tqdm(*__A , **__A , disable=__A )
| 277
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : List[Any] = logging.getLogger()
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("-f" )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , "all_results.json" )
if os.path.exists(UpperCAmelCase__ ):
with open(UpperCAmelCase__ , "r" ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( a ):
@classmethod
def __snake_case( cls : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __snake_case( cls : int ) -> Any:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "translation_no_trainer" ) ) )
@slow
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.1_0 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , "image_classification_no_trainer" ) ) )
| 403
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : Union[str, Any] = get_tests_dir('''fixtures''')
class lowercase ( unittest.TestCase ):
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_UpperCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowercase ( unittest.TestCase ):
@classmethod
def __snake_case( cls : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __snake_case( cls : Dict ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="test-feature-extractor" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 403
| 1
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : int = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : Optional[int] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : str = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : List[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__A : int = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : List[str] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__A : List[str] = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
__A : Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : Optional[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__A : Optional[Any] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : List[Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
__A : Union[str, Any] = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : Tuple = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
__A : Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __UpperCAmelCase( self ):
__A : List[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__A : int = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
| 718
|
def lowerCamelCase_ ( _lowercase = 2_000_000 ) -> int:
__A : str = [0 for i in range(n + 1 )]
__A : int = 1
__A : Dict = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowercase ):
__A : str = 1
__A : Union[str, Any] = 0
for i in range(_lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 387
| 0
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class _UpperCamelCase:
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int = None ):
'''simple docstring'''
__a : List[str] = (
os.path.join(A_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__a : Tuple = Extractor
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__a : Any = os.path.abspath(A_ )
return os.path.join(self.extract_dir , hash_url_to_filename(A_ ) )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(A_ ) and not (os.path.isdir(A_ ) and os.listdir(A_ ))
)
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict = False ):
'''simple docstring'''
__a : str = self.extractor.infer_extractor_format(A_ )
if not extractor_format:
return input_path
__a : Optional[Any] = self._get_output_path(A_ )
if self._do_extract(A_ , A_ ):
self.extractor.extract(A_ , A_ , A_ )
return output_path
class _UpperCamelCase( _UpperCAmelCase ):
@classmethod
@abstractmethod
def __lowerCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
...
class _UpperCamelCase( _UpperCAmelCase , _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = []
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
with open(A_ , 'rb' ) as f:
return f.read(A_ )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] = b"" ):
'''simple docstring'''
if not magic_number:
__a : int = max(len(A_ ) for cls_magic_number in cls.magic_numbers )
try:
__a : Optional[Any] = cls.read_magic_number(A_ , A_ )
except OSError:
return False
return any(magic_number.startswith(A_ ) for cls_magic_number in cls.magic_numbers )
class _UpperCamelCase( _UpperCAmelCase ):
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return tarfile.is_tarfile(A_ )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def resolved(SCREAMING_SNAKE_CASE__ : int ) -> str:
return os.path.realpath(os.path.abspath(A_ ) )
def badpath(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A_ , A_ ) ).startswith(A_ )
def badlink(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> bool:
# Links are interpreted relative to the directory containing the link
__a : int = resolved(os.path.join(A_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A_ )
__a : Any = resolved(A_ )
for finfo in members:
if badpath(finfo.name , A_ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(A_ , A_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(A_ , A_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
os.makedirs(A_ , exist_ok=A_ )
__a : Tuple = tarfile.open(A_ )
tar_file.extractall(A_ , members=TarExtractor.safemembers(A_ , A_ ) )
tar_file.close()
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = [B'''\x1F\x8B''']
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
with gzip.open(A_ , 'rb' ) as gzip_file:
with open(A_ , 'wb' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] = b"" ):
'''simple docstring'''
if super().is_extractable(A_ , magic_number=A_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A_ , 'rb' ) as fp:
__a : int = _EndRecData(A_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__a : List[Any] = fp.read(A_ ) # CD is where we expect it to be
if len(A_ ) == sizeCentralDir:
__a : Any = struct.unpack(A_ , A_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
os.makedirs(A_ , exist_ok=A_ )
with zipfile.ZipFile(A_ , 'r' ) as zip_file:
zip_file.extractall(A_ )
zip_file.close()
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
with lzma.open(A_ ) as compressed_file:
with open(A_ , 'wb' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(A_ , exist_ok=A_ )
__a : Union[str, Any] = rarfile.RarFile(A_ )
rf.extractall(A_ )
rf.close()
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
__a : List[Any] = zstd.ZstdDecompressor()
with open(A_ , 'rb' ) as ifh, open(A_ , 'wb' ) as ofh:
dctx.copy_stream(A_ , A_ )
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
with bza.open(A_ , 'rb' ) as compressed_file:
with open(A_ , 'wb' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(A_ , exist_ok=A_ )
with pyazr.SevenZipFile(A_ , 'r' ) as archive:
archive.extractall(A_ )
class _UpperCamelCase( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(A_ , 'rb' ) as compressed_file:
with open(A_ , 'wb' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class _UpperCamelCase:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE : Tuple = {
'''tar''': TarExtractor,
'''gzip''': GzipExtractor,
'''zip''': ZipExtractor,
'''xz''': XzExtractor,
'''rar''': RarExtractor,
'''zstd''': ZstdExtractor,
'''bz2''': BzipaExtractor,
'''7z''': SevenZipExtractor, # <Added version="2.4.0"/>
'''lz4''': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCAmelCase ( cls : Tuple ):
'''simple docstring'''
return max(
len(A_ )
for extractor in cls.extractors.values()
if issubclass(A_ , A_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(A_ , magic_number_length=A_ )
except OSError:
return b""
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = False ):
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=A_ , )
__a : Optional[int] = cls.infer_extractor_format(A_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : int ): # <Added version="2.4.0"/>
'''simple docstring'''
__a : int = cls._get_magic_number_max_length()
__a : List[Any] = cls._read_magic_number(A_ , A_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A_ , magic_number=A_ ):
return extractor_format
@classmethod
def __lowerCAmelCase ( cls : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[str] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(A_ ) , exist_ok=A_ )
# Prevent parallel extractions
__a : Union[str, Any] = str(Path(A_ ).with_suffix('.lock' ) )
with FileLock(A_ ):
shutil.rmtree(A_ , ignore_errors=A_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A_ , A_ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=A_ , )
__a : Tuple = extractor if extractor != '''deprecated''' else extractor_format
else:
__a : Dict = cls.extractors[extractor_format]
return extractor.extract(A_ , A_ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=A_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A_ ):
return extractor.extract(A_ , A_ )
| 47
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 492
| 0
|
UpperCAmelCase_ = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 476
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCAmelCase_ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 476
| 1
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_lowercase : List[Any] = 'path-to-your-trained-model'
_lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
_lowercase : Tuple = 'A photo of sks dog in a bucket'
_lowercase : Optional[int] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 49
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCamelCase = 8
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=BITS ):
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase__ )
UpperCAmelCase_ = rearrange(lowerCAmelCase__ , "d -> d 1 1" )
UpperCAmelCase_ = rearrange(lowerCAmelCase__ , "b c h w -> b c 1 h w" )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(lowerCAmelCase__ , "b c d h w -> b (c d) h w" )
UpperCAmelCase_ = bits * 2 - 1
return bits
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=BITS ):
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase__ , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(lowerCAmelCase__ , "d -> d 1 1" )
UpperCAmelCase_ = rearrange(lowerCAmelCase__ , "b (c d) h w -> b c d h w" , d=8 )
UpperCAmelCase_ = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = True , lowerCAmelCase__=None , lowerCAmelCase__ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(lowerCAmelCase__ , -scale , lowerCAmelCase__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(lowerCAmelCase__ ) else "cpu"
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ ).to(lowerCAmelCase__ )
UpperCAmelCase_ = self._get_variance(lowerCAmelCase__ , lowerCAmelCase__ ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="epsilon" , lowerCAmelCase__=None , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(lowerCAmelCase__ , -scale , lowerCAmelCase__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCAmelCase__ ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , _UpperCAmelCase : Optional[float] = 1.0 , ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Dict , _UpperCAmelCase : Optional[int] = 256 , _UpperCAmelCase : Optional[int] = 256 , _UpperCAmelCase : Optional[int] = 50 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_UpperCAmelCase , )
UpperCAmelCase_ = decimal_to_bits(_UpperCAmelCase ) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device )
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCAmelCase_ = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
UpperCAmelCase_ = bits_to_decimal(_UpperCAmelCase )
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 710
|
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMProphetNetTokenizer
UpperCamelCase = False
UpperCamelCase = True
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 14
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase_: int = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : List[str] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] , __a : Tuple=2_5_0_8_8_0 , __a : Tuple=6_4 , __a : Optional[int]=2 , __a : Optional[int]=8 , __a : int=1e-5 , __a : Any=0.02 , __a : int=True , __a : Optional[Any]=1 , __a : Union[str, Any]=2 , __a : Any=False , __a : List[Any]=0.0 , __a : str=0.0 , __a : Optional[int]=1 , __a : Optional[int]=False , **__a : Dict , ):
snake_case__ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ : str = kwargs.pop("""n_embed""" , __a )
snake_case__ : Any = hidden_size if n_embed is None else n_embed
snake_case__ : str = n_layer
snake_case__ : Optional[int] = n_head
snake_case__ : Any = layer_norm_epsilon
snake_case__ : Optional[Any] = initializer_range
snake_case__ : int = use_cache
snake_case__ : int = pretraining_tp
snake_case__ : Tuple = apply_residual_connection_post_layernorm
snake_case__ : Optional[int] = hidden_dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Optional[int] = bos_token_id
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Dict = slow_but_exact
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('1.12' )
def __init__( self : Tuple , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ):
super().__init__(__a , task=__a , patching_specs=__a , use_past=__a )
if not getattr(self._config , """pad_token_id""" , __a ):
# TODO: how to do that better?
snake_case__ : Optional[int] = 0
@property
def lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__a , direction="""inputs""" , inverted_values_shape=__a )
snake_case__ : List[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase ( self : Optional[Any] ):
return self._config.n_layer
@property
def lowercase ( self : List[Any] ):
return self._config.n_head
@property
def lowercase ( self : Optional[int] ):
return 1e-3
def lowercase ( self : List[Any] , __a : "PreTrainedTokenizer" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ):
snake_case__ : List[str] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
snake_case__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : int = seqlen + 2
snake_case__ : Any = self._config.hidden_size // self.num_attention_heads
snake_case__ : int = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case__ : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case__ : Union[str, Any] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
snake_case__ : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
snake_case__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Optional[Any] ):
return 1_3
| 648
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase_: int = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : List[str] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] , __a : Tuple=2_5_0_8_8_0 , __a : Tuple=6_4 , __a : Optional[int]=2 , __a : Optional[int]=8 , __a : int=1e-5 , __a : Any=0.02 , __a : int=True , __a : Optional[Any]=1 , __a : Union[str, Any]=2 , __a : Any=False , __a : List[Any]=0.0 , __a : str=0.0 , __a : Optional[int]=1 , __a : Optional[int]=False , **__a : Dict , ):
snake_case__ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ : str = kwargs.pop("""n_embed""" , __a )
snake_case__ : Any = hidden_size if n_embed is None else n_embed
snake_case__ : str = n_layer
snake_case__ : Optional[int] = n_head
snake_case__ : Any = layer_norm_epsilon
snake_case__ : Optional[Any] = initializer_range
snake_case__ : int = use_cache
snake_case__ : int = pretraining_tp
snake_case__ : Tuple = apply_residual_connection_post_layernorm
snake_case__ : Optional[int] = hidden_dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Optional[int] = bos_token_id
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Dict = slow_but_exact
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('1.12' )
def __init__( self : Tuple , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ):
super().__init__(__a , task=__a , patching_specs=__a , use_past=__a )
if not getattr(self._config , """pad_token_id""" , __a ):
# TODO: how to do that better?
snake_case__ : Optional[int] = 0
@property
def lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__a , direction="""inputs""" , inverted_values_shape=__a )
snake_case__ : List[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase ( self : Optional[Any] ):
return self._config.n_layer
@property
def lowercase ( self : List[Any] ):
return self._config.n_head
@property
def lowercase ( self : Optional[int] ):
return 1e-3
def lowercase ( self : List[Any] , __a : "PreTrainedTokenizer" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ):
snake_case__ : List[str] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
snake_case__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : int = seqlen + 2
snake_case__ : Any = self._config.hidden_size // self.num_attention_heads
snake_case__ : int = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case__ : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case__ : Union[str, Any] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
snake_case__ : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
snake_case__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Optional[Any] ):
return 1_3
| 648
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = """markuplm"""
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1E-12 , a__=0 , a__=0 , a__=2 , a__=2_56 , a__=10_24 , a__=2_16 , a__=10_01 , a__=32 , a__=50 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
# additional properties
_UpperCAmelCase = max_depth
_UpperCAmelCase = max_xpath_tag_unit_embeddings
_UpperCAmelCase = max_xpath_subs_unit_embeddings
_UpperCAmelCase = tag_pad_id
_UpperCAmelCase = subs_pad_id
_UpperCAmelCase = xpath_unit_hidden_size
| 494
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def __A ( self ):
_UpperCAmelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
_UpperCAmelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
_UpperCAmelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
_UpperCAmelCase = shift_tokens_right(a__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_UpperCAmelCase = model(a__ , decoder_input_ids=a__ ).logits
_UpperCAmelCase = optax.softmax_cross_entropy(a__ , onehot(a__ , logits.shape[-1] ) ).mean()
_UpperCAmelCase = -(labels.shape[-1] * loss.item())
_UpperCAmelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 494
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lilt'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=None , _UpperCAmelCase=4 , _UpperCAmelCase=1024 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : str = vocab_size
__A : Any = hidden_size
__A : Optional[int] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : Union[str, Any] = intermediate_size
__A : str = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Union[str, Any] = layer_norm_eps
__A : str = position_embedding_type
__A : int = classifier_dropout
__A : int = channel_shrink_ratio
__A : int = max_ad_position_embeddings
| 8
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = '''xmod'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=30_522 , _SCREAMING_SNAKE_CASE : Tuple=768 , _SCREAMING_SNAKE_CASE : Optional[int]=12 , _SCREAMING_SNAKE_CASE : List[str]=12 , _SCREAMING_SNAKE_CASE : Optional[Any]=3_072 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : str=512 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0_2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-1_2 , _SCREAMING_SNAKE_CASE : List[str]=1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : Optional[Any]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Tuple=("en_XX",) , _SCREAMING_SNAKE_CASE : Optional[int]=None , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE : List[Any] = pre_norm
SCREAMING_SNAKE_CASE : int = adapter_reduction_factor
SCREAMING_SNAKE_CASE : List[Any] = adapter_layer_norm
SCREAMING_SNAKE_CASE : Any = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE : Any = ln_before_adapter
SCREAMING_SNAKE_CASE : Tuple = list(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = default_language
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 265
| 0
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model'}
UpperCamelCase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
UpperCamelCase = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class _A ( __a ):
lowercase_ : int = VOCAB_FILES_NAMES
lowercase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : int = ["input_ids", "attention_mask"]
lowercase_ : List[int] = []
def __init__( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any="<unk>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : int="[SEP]" , lowerCamelCase__ : str="[MASK]" , lowerCamelCase__ : str="[CLS]" , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : List[str] , ):
"""simple docstring"""
__UpperCamelCase : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
__UpperCamelCase : int = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
__UpperCamelCase : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
__UpperCamelCase : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
__UpperCamelCase : List[Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
__UpperCamelCase : Any = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase : List[Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
__UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__UpperCamelCase : Tuple = vocab_file
__UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def a ( self : int ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Tuple = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
__UpperCamelCase : str = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self : List[str] , lowerCamelCase__ : int ):
"""simple docstring"""
__UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : List[Any] , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def a ( self : List[Any] , lowerCamelCase__ : int ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def a ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def a ( self : Dict , lowerCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : str = """"""
__UpperCamelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__UpperCamelCase : str = True
__UpperCamelCase : List[str] = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__UpperCamelCase : Dict = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def a ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] = False , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : List[Any] = True , **lowerCamelCase__ : str , ):
"""simple docstring"""
__UpperCamelCase : str = kwargs.pop("""use_source_tokenizer""" , lowerCAmelCase_ )
__UpperCamelCase : str = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : Optional[int] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
__UpperCamelCase : Tuple = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__UpperCamelCase : int = re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(lowerCAmelCase_ ) )
else:
__UpperCamelCase : Optional[int] = """""".join(lowerCAmelCase_ )
__UpperCamelCase : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__UpperCamelCase : Union[str, Any] = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def a ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
__UpperCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def a ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : Union[str, Any] = [self.cls_token_id]
__UpperCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] = None , lowerCamelCase__ : Dict = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def a ( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int = None ):
"""simple docstring"""
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 719
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCamelCase = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : int = cn.convert_to_negative(__lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCamelCase ( ) -> Optional[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCamelCase ( ) -> Dict:
__UpperCamelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCamelCase ( ) -> str:
__UpperCamelCase : List[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : Optional[int] = canny.canny(__lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCamelCase ( ) -> Optional[int]:
assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all()
def __lowerCamelCase ( ) -> Tuple:
# laplace diagonals
__UpperCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Any = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
assert res.any()
def __lowerCamelCase ( ) -> List[str]:
assert med.median_filter(__lowerCAmelCase , 3 ).any()
def __lowerCamelCase ( ) -> int:
__UpperCamelCase , __UpperCamelCase : List[Any] = sob.sobel_filter(__lowerCAmelCase )
assert grad.any() and theta.any()
def __lowerCamelCase ( ) -> Optional[int]:
__UpperCamelCase : int = sp.make_sepia(__lowerCAmelCase , 20 )
assert sepia.all()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
__UpperCamelCase : str = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__UpperCamelCase : Dict = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
__UpperCamelCase : Any = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__UpperCamelCase : int = imread(__lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : Dict = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : str = image[x_coordinate][y_coordinate]
__UpperCamelCase : Tuple = lbp.get_neighbors_pixel(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : str = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert lbp_image.any()
| 515
| 0
|
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] ):
__UpperCAmelCase : Any = """"""
for word_or_phrase in separated:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 63
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = PriorTransformer
lowerCAmelCase__ : int = """hidden_states"""
@property
def A ( self ) -> Tuple:
a_ : Union[str, Any] = 4
a_ : Tuple = 8
a_ : Dict = 7
a_ : Dict = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : int = 4
a_ : Dict = 8
a_ : Dict = 7
a_ : Dict = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def A ( self ) -> Optional[Any]:
return (4, 8)
@property
def A ( self ) -> Any:
return (4, 8)
def A ( self ) -> List[str]:
a_ : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
a_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def A ( self ) -> Dict:
a_ , a_ : str = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def A ( self ) -> str:
a_ , a_ : str = self.prepare_init_args_and_inputs_for_common()
a_ : List[Any] = self.model_class(**_SCREAMING_SNAKE_CASE )
a_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[Any] = [*signature.parameters.keys()]
a_ : Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def A ( self ) -> Any:
a_ : Tuple = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
a_ : Union[str, Any] = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
a_ : List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
a_ : Dict = model(**_SCREAMING_SNAKE_CASE )[0]
a_ : Any = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a_ : Optional[int] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=7_7 , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = batch_size
a_ : Optional[Any] = embedding_dim
a_ : Tuple = num_embeddings
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
a_ : List[Any] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
a_ : int = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
a_ : str = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 473
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
'''simple docstring'''
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
def _snake_case ( self: str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _snake_case ( self: Dict ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _snake_case ( self: List[str] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = torch.arange(self.height * self.width )
__lowerCamelCase : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(a , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def _snake_case ( self: Optional[int] ):
__lowerCamelCase , *__lowerCamelCase : int = self.shape
__lowerCamelCase : Optional[Any] = int(np.prod(a ) )
__lowerCamelCase : Dict = self.get_image_coords()
__lowerCamelCase : Optional[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase : Tuple = self.get_camera_rays(a )
__lowerCamelCase : Union[str, Any] = rays.view(a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _snake_case ( self: Optional[Any] , a: torch.Tensor ):
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase : Union[str, Any] = coords.view(a , -1 , 2 )
__lowerCamelCase : Dict = self.resolution()
__lowerCamelCase : List[Any] = self.fov()
__lowerCamelCase : str = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
__lowerCamelCase : Dict = fracs.view(a , -1 , 2 )
__lowerCamelCase : Dict = (
self.z.view(a , 1 , 3 )
+ self.x.view(a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase : int = directions / directions.norm(dim=-1 , keepdim=a )
__lowerCamelCase : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a , *a , 2 , 3 )
def _snake_case ( self: int , a: int , a: int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a , height=a , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : str = []
__lowerCamelCase : Optional[int] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCamelCase : Tuple = np.array([np.sin(SCREAMING_SNAKE_CASE__ ), np.cos(SCREAMING_SNAKE_CASE__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase : Optional[Any] = -z * 4
__lowerCamelCase : Any = np.array([np.cos(SCREAMING_SNAKE_CASE__ ), -np.sin(SCREAMING_SNAKE_CASE__ ), 0.0] )
__lowerCamelCase : Optional[int] = np.cross(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
origins.append(SCREAMING_SNAKE_CASE__ )
xs.append(SCREAMING_SNAKE_CASE__ )
ys.append(SCREAMING_SNAKE_CASE__ )
zs.append(SCREAMING_SNAKE_CASE__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , width=SCREAMING_SNAKE_CASE__ , height=SCREAMING_SNAKE_CASE__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(SCREAMING_SNAKE_CASE__ )) , )
| 230
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 621
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'A painting of a squirrel eating a burger'
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.device_count()
_SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = replicate(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = shard(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_SCREAMING_SNAKE_CASE : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = 'stabilityai/stable-diffusion-2'
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision='bf16' , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : Any = scheduler_params
_SCREAMING_SNAKE_CASE : int = 'A painting of a squirrel eating a burger'
_SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = replicate(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = shard(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_SCREAMING_SNAKE_CASE : int = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_SCREAMING_SNAKE_CASE : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 621
| 1
|
'''simple docstring'''
import math
import sys
def __lowerCAmelCase ( a_ ) -> int:
'''simple docstring'''
if number != int(a_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE : List[Any] = sys.maxsize
SCREAMING_SNAKE_CASE : int = int(math.sqrt(a_ ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE : List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE : List[str] = min(a_ , a_ )
SCREAMING_SNAKE_CASE : str = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Dict = LEDConfig
snake_case__ : Dict = {}
snake_case__ : int = "gelu"
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[int] = global_attention_mask
return config, inputs_dict
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = TFLEDModel(config=lowercase__ ).get_decoder()
SCREAMING_SNAKE_CASE : int = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : int = model(lowercase__ , attention_mask=lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def __lowerCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : int = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case__ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = True
snake_case__ : Tuple = False
snake_case__ : Dict = False
snake_case__ : int = False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros_like(inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
def _UpperCamelCase ( self ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
return tf.constant(a_ , dtype=tf.intaa )
_lowerCAmelCase :Union[str, Any] = 1E-4
@slow
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Any = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
SCREAMING_SNAKE_CASE : List[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 179
| 1
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def _A ( self : Tuple ):
return self.get_dummy_input()
@property
def _A ( self : Optional[int] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _A ( self : List[Any] , A : Optional[Any]=True , A : List[Any]=False , A : List[str]=False , A : Optional[Any]=False , ):
_UpperCAmelCase : Union[str, Any] = 4
_UpperCAmelCase : Dict = 32
_UpperCAmelCase : Tuple = (32, 32)
_UpperCAmelCase : Any = torch.manual_seed(0 )
_UpperCAmelCase : Any = torch.device(A )
_UpperCAmelCase : List[str] = (batch_size, num_channels) + sizes
_UpperCAmelCase : Tuple = randn_tensor(A , generator=A , device=A )
_UpperCAmelCase : Union[str, Any] = {"hidden_states": hidden_states}
if include_temb:
_UpperCAmelCase : Any = 128
_UpperCAmelCase : int = randn_tensor((batch_size, temb_channels) , generator=A , device=A )
if include_res_hidden_states_tuple:
_UpperCAmelCase : Optional[Any] = torch.manual_seed(1 )
_UpperCAmelCase : Dict = (randn_tensor(A , generator=A , device=A ),)
if include_encoder_hidden_states:
_UpperCAmelCase : Any = floats_tensor((batch_size, 32, 32) ).to(A )
if include_skip_sample:
_UpperCAmelCase : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=A , device=A )
return dummy_input
def _A ( self : Optional[int] ):
_UpperCAmelCase : str = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
_UpperCAmelCase : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
_UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def _A ( self : Union[str, Any] , A : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : Dict = self.block_class(**A )
unet_block.to(A )
unet_block.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = unet_block(**A )
if isinstance(A , A ):
_UpperCAmelCase : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCAmelCase : Union[str, Any] = output[0, -1, -3:, -3:]
_UpperCAmelCase : Union[str, Any] = torch.tensor(A ).to(A )
assert torch_all_close(output_slice.flatten() , A , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def _A ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : str = self.block_class(**A )
model.to(A )
model.train()
_UpperCAmelCase : Union[str, Any] = model(**A )
if isinstance(A , A ):
_UpperCAmelCase : Union[str, Any] = output[0]
_UpperCAmelCase : List[str] = torch.device(A )
_UpperCAmelCase : List[str] = randn_tensor(output.shape , device=A )
_UpperCAmelCase : Any = torch.nn.functional.mse_loss(A , A )
loss.backward()
| 244
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__SCREAMING_SNAKE_CASE : Optional[int] = {"""UserAgent""": UserAgent().random}
def UpperCamelCase_ ( _UpperCAmelCase : Dict ) -> dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = script.contents[0]
_UpperCAmelCase : List[Any] = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : List[Any] ):
_UpperCAmelCase : int = F"""https://www.instagram.com/{username}/"""
_UpperCAmelCase : Tuple = self.get_json()
def _A ( self : str ):
_UpperCAmelCase : Optional[Any] = requests.get(self.url , headers=A ).text
_UpperCAmelCase : Union[str, Any] = BeautifulSoup(A , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ):
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : int ):
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _A ( self : List[str] ):
return self.user_data["username"]
@property
def _A ( self : Dict ):
return self.user_data["full_name"]
@property
def _A ( self : Tuple ):
return self.user_data["biography"]
@property
def _A ( self : Tuple ):
return self.user_data["business_email"]
@property
def _A ( self : str ):
return self.user_data["external_url"]
@property
def _A ( self : Union[str, Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A ( self : List[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def _A ( self : int ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A ( self : Optional[Any] ):
return self.user_data["profile_pic_url_hd"]
@property
def _A ( self : str ):
return self.user_data["is_verified"]
@property
def _A ( self : Tuple ):
return self.user_data["is_private"]
def UpperCamelCase_ ( _UpperCAmelCase : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCAmelCase : List[Any] = InstagramUser(_UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Any = InstagramUser("""github""")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 244
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : int = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class snake_case ( UpperCAmelCase ):
__magic_name__ = '''codegen'''
__magic_name__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , A : List[str]=5_0_4_0_0 , A : Optional[int]=2_0_4_8 , A : int=2_0_4_8 , A : Dict=4_0_9_6 , A : Tuple=2_8 , A : Dict=1_6 , A : Tuple=6_4 , A : str=None , A : List[str]="gelu_new" , A : str=0.0 , A : Optional[int]=0.0 , A : str=0.0 , A : Dict=1E-5 , A : int=0.02 , A : Union[str, Any]=True , A : Union[str, Any]=5_0_2_5_6 , A : Optional[Any]=5_0_2_5_6 , A : List[Any]=False , **A : Optional[int] , ):
'''simple docstring'''
a : List[str] = vocab_size
a : Any = n_ctx
a : Tuple = n_positions
a : Union[str, Any] = n_embd
a : Optional[Any] = n_layer
a : Tuple = n_head
a : Union[str, Any] = n_inner
a : str = rotary_dim
a : Optional[int] = activation_function
a : Dict = resid_pdrop
a : Tuple = embd_pdrop
a : Any = attn_pdrop
a : Dict = layer_norm_epsilon
a : List[Any] = initializer_range
a : str = use_cache
a : Optional[Any] = bos_token_id
a : List[Any] = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class snake_case ( UpperCAmelCase ):
def __init__( self : int , A : PretrainedConfig , A : str = "default" , A : List[PatchingSpec] = None , A : bool = False , ):
'''simple docstring'''
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , 'pad_token_id' , A ):
# TODO: how to do that better?
a : Union[str, Any] = 0
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Optional[int] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction='inputs' )
a : str = {0: 'batch', 1: 'past_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self : Tuple , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
'''simple docstring'''
a : List[Any] = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
a : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
a, a : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
a : str = seqlen + 2
a : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a : Optional[int] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
a : Tuple = common_inputs['attention_mask']
if self.use_past:
a : Optional[Any] = ordered_inputs['attention_mask'].dtype
a : Optional[int] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return 1_3
| 118
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = RoCBertTokenizer
__magic_name__ = None
__magic_name__ = False
__magic_name__ = True
__magic_name__ = filter_non_english
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
a : str = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
a : List[Any] = {}
a : Any = {}
for i, value in enumerate(A ):
a : Union[str, Any] = i
a : str = i
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A , A , ensure_ascii=A )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A , A , ensure_ascii=A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a : Any = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A ) , [5, 6, 2, 5, 7, 8] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : List[str] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Any = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : int = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Any = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Dict = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : List[str] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : List[str] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a : str = {}
for i, token in enumerate(A ):
a : Union[str, Any] = i
a : Any = RoCBertWordpieceTokenizer(vocab=A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : List[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
a : str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a : List[str] = self.rust_tokenizer_class.from_pretrained(A , **A )
a : Any = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
a : str = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
a : int = tokenizer_r.do_lower_case if hasattr(A , 'do_lower_case' ) else False
a : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Union[str, Any] = ['的', '人', '有']
a : Tuple = ''.join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a : Optional[int] = True
a : Any = self.tokenizer_class.from_pretrained(A , **A )
a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
a : Optional[Any] = tokenizer_p.encode(A , add_special_tokens=A )
a : Dict = tokenizer_r.encode(A , add_special_tokens=A )
a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A )
a : Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A )
self.assertListEqual(A , A )
a : Dict = False
a : Any = self.rust_tokenizer_class.from_pretrained(A , **A )
a : List[Any] = self.tokenizer_class.from_pretrained(A , **A )
a : Optional[int] = tokenizer_r.encode(A , add_special_tokens=A )
a : List[str] = tokenizer_p.encode(A , add_special_tokens=A )
a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A )
a : Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
a : List[str] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a : List[str] = tokenizer.encode('你好' , add_special_tokens=A )
a : int = tokenizer.encode('你是谁' , add_special_tokens=A )
a : Any = tokenizer.build_inputs_with_special_tokens(A )
a : Tuple = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Optional[int] = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a : Dict = '你好,你是谁'
a : List[str] = tokenizer.tokenize(A )
a : str = tokenizer.convert_tokens_to_ids(A )
a : str = tokenizer.convert_tokens_to_shape_ids(A )
a : Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(A )
a : Any = tokenizer.prepare_for_model(
A , A , A , add_special_tokens=A )
a : Union[str, Any] = tokenizer.encode_plus(A , add_special_tokens=A )
self.assertEqual(A , A )
| 118
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase__ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""DPTFeatureExtractor"""]
UpperCAmelCase__ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 277
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _split_gen_kwargs(lowercase ,lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 277
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Any = ["image_processor", "tokenizer"]
UpperCamelCase : Optional[Any] = "AutoImageProcessor"
UpperCamelCase : List[str] = "AutoTokenizer"
def __init__( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.image_processor
def __call__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
_lowerCAmelCase = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
_lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def _lowerCamelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def _lowerCamelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 589
|
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
def __init__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = order
# a_{0} ... a_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowerCAmelCase = [0.0] * self.order
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
if len(__magic_name__ ) < self.order:
_lowerCAmelCase = [1.0, *a_coeffs]
if len(__magic_name__ ) != self.order + 1:
_lowerCAmelCase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__magic_name__ )}'''
)
raise ValueError(__magic_name__ )
if len(__magic_name__ ) != self.order + 1:
_lowerCAmelCase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__magic_name__ )}'''
)
raise ValueError(__magic_name__ )
_lowerCAmelCase = a_coeffs
_lowerCAmelCase = b_coeffs
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowerCAmelCase = self.input_history[:-1]
_lowerCAmelCase = self.output_history[:-1]
_lowerCAmelCase = sample
_lowerCAmelCase = result
return result
| 589
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__snake_case : Union[str, Any] = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __lowerCamelCase ( __snake_case : int ) -> list[int]:
"""simple docstring"""
if not isinstance(__snake_case, __snake_case ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A__ : str =[]
for num in range(len(__snake_case ) ):
A__ : List[Any] =0
while 2 * i * i <= odd_composites[num]:
A__ : List[str] =odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 687
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 'bit'
__snake_case = ['preactivation', 'bottleneck']
__snake_case = ['SAME', 'VALID']
def __init__( self : List[str] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Optional[Any]="preactivation" , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ : List[Any] =global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
A__ : List[Any] =num_channels
A__ : Tuple =embedding_size
A__ : Union[str, Any] =hidden_sizes
A__ : List[str] =depths
A__ : Optional[Any] =layer_type
A__ : int =hidden_act
A__ : int =global_padding
A__ : int =num_groups
A__ : str =drop_path_rate
A__ : str =embedding_dynamic_padding
A__ : Dict =output_stride
A__ : Optional[int] =width_factor
A__ : List[str] =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
A__ , A__ : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 687
| 1
|
"""simple docstring"""
import os
a_ = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 0
while index < len(__UpperCamelCase ) - 1:
__lowercase : Any = SYMBOLS[numerals[index]]
__lowercase : Any = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[str] = ''''''
__lowercase : str = num // 10_00
numerals += m_count * "M"
num %= 10_00
__lowercase : Any = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__lowercase : List[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __UpperCAmelCase ( __UpperCamelCase = "/p089_roman.txt" ):
__lowercase : str = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
__lowercase : Tuple = filea.readlines()
for line in lines:
__lowercase : List[str] = line.strip()
__lowercase : Union[str, Any] = parse_roman_numerals(__UpperCamelCase )
__lowercase : List[Any] = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 76
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def a (_lowerCAmelCase ):
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_lowerCAmelCase , **_lowerCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowerCAmelCase , **_lowerCAmelCase )
return wrapper
| 89
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , _lowerCAmelCase ).groups()[0]
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple=None , _lowerCamelCase: str=None ):
SCREAMING_SNAKE_CASE_ = file_names
SCREAMING_SNAKE_CASE_ = image_transform
SCREAMING_SNAKE_CASE_ = label_to_id
def __len__( self: Any ):
return len(self.file_names )
def __getitem__( self: Union[str, Any] , _lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.file_names[idx]
SCREAMING_SNAKE_CASE_ = PIL.Image.open(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = raw_image.convert('''RGB''' )
if self.image_transform is not None:
SCREAMING_SNAKE_CASE_ = self.image_transform(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = extract_label(_lowerCamelCase )
if self.label_to_id is not None:
SCREAMING_SNAKE_CASE_ = self.label_to_id[label]
return {"image": image, "label": label}
def a (_lowerCAmelCase , _lowerCAmelCase ):
# Initialize accelerator
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config['''lr''']
SCREAMING_SNAKE_CASE_ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE_ = config['''image_size''']
if not isinstance(_lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE_ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
SCREAMING_SNAKE_CASE_ = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
SCREAMING_SNAKE_CASE_ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = os.path.split(_lowerCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Grab all the image filenames
SCREAMING_SNAKE_CASE_ = [os.path.join(args.data_dir , _lowerCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
SCREAMING_SNAKE_CASE_ = [extract_label(_lowerCAmelCase ) for fname in file_names]
SCREAMING_SNAKE_CASE_ = list(set(_lowerCAmelCase ) )
id_to_label.sort()
SCREAMING_SNAKE_CASE_ = {lbl: i for i, lbl in enumerate(_lowerCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# Split our filenames between train and validation
SCREAMING_SNAKE_CASE_ = np.random.permutation(len(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = int(0.8 * len(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = random_perm[:cut]
SCREAMING_SNAKE_CASE_ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
SCREAMING_SNAKE_CASE_ = Compose([RandomResizedCrop(_lowerCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
SCREAMING_SNAKE_CASE_ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# For evaluation, we use a deterministic Resize
SCREAMING_SNAKE_CASE_ = Compose([Resize(_lowerCAmelCase ), ToTensor()] )
SCREAMING_SNAKE_CASE_ = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = create_model('''resnet50d''' , pretrained=_lowerCAmelCase , num_classes=len(_lowerCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
SCREAMING_SNAKE_CASE_ = False
for param in model.get_classifier().parameters():
SCREAMING_SNAKE_CASE_ = True
# We normalize the batches of images to be a bit faster.
SCREAMING_SNAKE_CASE_ = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
SCREAMING_SNAKE_CASE_ = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
SCREAMING_SNAKE_CASE_ = OneCycleLR(optimizer=_lowerCAmelCase , max_lr=_lowerCAmelCase , epochs=_lowerCAmelCase , steps_per_epoch=len(_lowerCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_ = 0
# We also need to keep track of the starting epoch so files are named properly
SCREAMING_SNAKE_CASE_ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE_ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
SCREAMING_SNAKE_CASE_ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
SCREAMING_SNAKE_CASE_ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
SCREAMING_SNAKE_CASE_ = os.path.splitext(_lowerCAmelCase )[0]
if "epoch" in training_difference:
SCREAMING_SNAKE_CASE_ = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
SCREAMING_SNAKE_CASE_ = None
else:
SCREAMING_SNAKE_CASE_ = int(training_difference.replace('''step_''' , '''''' ) )
SCREAMING_SNAKE_CASE_ = resume_step // len(_lowerCAmelCase )
resume_step -= starting_epoch * len(_lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
SCREAMING_SNAKE_CASE_ = accelerator.skip_first_batches(_lowerCAmelCase , _lowerCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
SCREAMING_SNAKE_CASE_ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE_ = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE_ = (batch['''image'''] - mean) / std
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.cross_entropy(_lowerCAmelCase , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
SCREAMING_SNAKE_CASE_ = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE_ = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE_ = (batch['''image'''] - mean) / std
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['''label''']) )
SCREAMING_SNAKE_CASE_ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
SCREAMING_SNAKE_CASE_ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {1_0_0 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_0_0 * eval_metric,
'''train_loss''': total_loss.item() / len(_lowerCAmelCase ),
'''epoch''': epoch,
} , step=_lowerCAmelCase , )
if checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE_ = F"epoch_{epoch}"
if args.output_dir is not None:
SCREAMING_SNAKE_CASE_ = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
if args.with_tracking:
accelerator.end_training()
def a ():
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=_lowerCAmelCase , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=_lowerCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=_lowerCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 6_4, '''image_size''': 2_2_4}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 89
| 1
|
'''simple docstring'''
from __future__ import annotations
_UpperCAmelCase : str = '''Muhammad Umer Farooq'''
_UpperCAmelCase : Any = '''MIT'''
_UpperCAmelCase : Tuple = '''1.0.0'''
_UpperCAmelCase : List[str] = '''Muhammad Umer Farooq'''
_UpperCAmelCase : Optional[int] = '''contact@muhammadumerfarooq.me'''
_UpperCAmelCase : str = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case_ ):
super().__init__()
lowercase =[]
lowercase =domain
def _A( self , snake_case_ , snake_case_ ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase =parse.urljoin(self.domain , snake_case_ )
self.urls.append(snake_case_ )
def UpperCamelCase ( lowercase_ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(lowercase_ ).split('''.''' )[-2:] )
def UpperCamelCase ( lowercase_ : str ) -> str:
'''simple docstring'''
return parse.urlparse(lowercase_ ).netloc
def UpperCamelCase ( lowercase_ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
lowercase =get_domain_name(lowercase_ )
# Initialize the parser
lowercase =Parser(lowercase_ )
try:
# Open URL
lowercase =requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase =requests.get(lowercase_ )
# Get the valid email.
lowercase =re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 72
|
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = num_of_nodes
UpperCAmelCase__: list[list[int]] = []
UpperCAmelCase__: dict[int, int] = {}
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase__: Optional[Any] = self.find_component(lowerCamelCase__ )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase__: Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase__: Dict = self.find_component(lowerCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase__ )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = []
UpperCAmelCase__: int = 0
UpperCAmelCase__: list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase__: Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: str = edge
UpperCAmelCase__: str = self.m_component[u]
UpperCAmelCase__: Optional[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase__: List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: str = edge
UpperCAmelCase__: str = self.m_component[u]
UpperCAmelCase__: int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCAmelCase__: Any = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
| 0
|
'''simple docstring'''
from itertools import permutations
def _UpperCamelCase ( _a : tuple ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCamelCase : Tuple = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(_a ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def _UpperCamelCase ( _a : int = 1_0 ):
"""simple docstring"""
return sum(
int(''.join(map(_a , _a ) ) )
for num in permutations(range(_a ) )
if is_substring_divisible(_a ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a= logging.get_logger(__name__)
a= '''▁'''
a= {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a= {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a= {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
a= ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a= {'''mustc''': MUSTC_LANGS}
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<unk>" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ):
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__UpperCamelCase : Union[str, Any] = do_upper_case
__UpperCamelCase : Dict = do_lower_case
__UpperCamelCase : List[str] = load_json(_lowerCamelCase )
__UpperCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCamelCase : Any = lang_codes
__UpperCamelCase : Any = LANGUAGES[lang_codes]
__UpperCamelCase : str = [f"""<lang:{lang}>""" for lang in self.langs]
__UpperCamelCase : List[str] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
__UpperCamelCase : str = self.lang_tokens
__UpperCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCamelCase : Dict = {}
@property
def lowerCAmelCase ( self ):
return len(self.encoder )
@property
def lowerCAmelCase ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : int = self.lang_code_to_id[tgt_lang]
__UpperCamelCase : List[str] = [lang_code_id]
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCamelCase : int = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCamelCase : int = []
else:
current_sub_tokens.append(_lowerCamelCase )
__UpperCamelCase : str = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__UpperCamelCase : int = self.__dict__.copy()
__UpperCamelCase : Dict = None
return state
def __setstate__( self , _lowerCamelCase ):
__UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase : Optional[int] = {}
__UpperCamelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ):
__UpperCamelCase : List[str] = Path(_lowerCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
__UpperCamelCase : Optional[int] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def _UpperCamelCase ( _a : str , _a : Dict[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = sentencepiece.SentencePieceProcessor(**_a )
spm.Load(str(_a ) )
return spm
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
with open(_a , 'r' ) as f:
return json.load(_a )
def _UpperCamelCase ( _a : Any , _a : str ):
"""simple docstring"""
with open(_a , 'w' ) as f:
json.dump(_a , _a , indent=2 )
| 287
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __magic_name__ ( ) -> int:
_lowercase : Dict = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowercase : Union[str, Any] = get_sagemaker_input()
else:
_lowercase : str = get_cluster_input()
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE=None ) -> List[Any]:
if subparsers is not None:
_lowercase : Union[str, Any] = subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE )
else:
_lowercase : List[str] = argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : List[str] = get_user_input()
if args.config_file is not None:
_lowercase : Optional[Any] = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE )
print(F"""accelerate configuration saved at {config_file}""" )
def __magic_name__ ( ) -> Optional[int]:
_lowercase : Union[str, Any] = config_command_parser()
_lowercase : Any = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 66
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict, *_lowerCamelCase : Tuple, **_lowerCamelCase : Any ):
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''', _lowerCamelCase, )
super().__init__(*_lowerCamelCase, **_lowerCamelCase )
| 215
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def lowerCAmelCase ( ):
"""simple docstring"""
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(__UpperCamelCase )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(__UpperCamelCase )
__A = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 215
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
A_ = {"target_lang": "fi", "source_lang": "en"}
A_ = ">>zh<<"
A_ = "Helsinki-NLP/"
if is_torch_available():
A_ = "pt"
elif is_tf_available():
A_ = "tf"
else:
A_ = "jax"
@require_sentencepiece
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MarianTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
lowerCamelCase_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = """</s>"""
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase_ = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
lowerCamelCase_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
lowerCamelCase_ = """Tämä on testi"""
lowerCamelCase_ = """This is a test"""
lowerCamelCase_ = [76, 7, 2047, 2]
lowerCamelCase_ = [69, 12, 11, 940, 2]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaControlnetPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : int ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return 100
@property
def __lowercase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase__ : int = UNetaDConditionModel(**A )
return model
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.dummy_unet
UpperCAmelCase__ : List[Any] = self.dummy_movq
UpperCAmelCase__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,)
UpperCAmelCase__ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
A )
# create hint
UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase__ : Dict = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """cpu"""
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A )
UpperCAmelCase__ : Optional[int] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = pipe(
**self.get_dummy_inputs(A ) ,return_dict=A ,)[0]
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCAmelCase__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0
UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(A )
UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
UpperCAmelCase__ : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo"""
UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior(
A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ : int = pipeline(
image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,)
UpperCAmelCase__ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A ,A )
| 65
| 0
|
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 0.1 ) -> int:
snake_case__ = 3
snake_case__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:Optional[Any] , _a:List[Any] , _a:Any=7 , _a:str=3 , _a:Tuple=10 , _a:str=18 , _a:List[str]=30 , _a:Tuple=4_00 , _a:str=True , _a:List[str]=None , _a:List[str]=True , _a:Optional[Any]=[0.5, 0.5, 0.5] , _a:List[str]=[0.5, 0.5, 0.5] , _a:int=None , ):
snake_case__ = size if size is not None else {'''shortest_edge''': 18}
snake_case__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = num_frames
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = crop_size
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 208
| 0
|
__a: str = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__a: int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__a: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 108
|
import functools
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _SCREAMING_SNAKE_CASE ) , 1 + min_distance(_SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Dict:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
__snake_case = nn.Parameter(_lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
__snake_case = nn.Parameter(_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case = np.asarray(weights[0] )
__snake_case = np.asarray(weights[1] )
__snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case = np.asarray(weights[0] )
__snake_case = np.asarray(weights[1] )
__snake_case = np.asarray(weights[2] )
__snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = weights[0][0][0]
__snake_case = np.asarray(layer_norm_a[0] )
__snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# lsh weights + output
__snake_case = weights[0][1]
if len(_lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
else:
set_layer_weights_in_torch_local(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
# intermediate weighs
__snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowerCAmelCase ) == 4:
__snake_case = intermediate_weights[2]
# layernorm 2
__snake_case = np.asarray(intermediate_weights[0][0] )
__snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# intermediate dense
__snake_case = np.asarray(intermediate_weights[1][0] )
__snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
# intermediate out
__snake_case = np.asarray(intermediate_weights[4][0] )
__snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case = torch_model.reformer
# word embeds
__snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowerCAmelCase ) , )
if isinstance(weights[3] , _lowerCAmelCase ):
__snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
__snake_case = nn.Parameter(torch.tensor(_lowerCAmelCase ) )
__snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# output layer norm
__snake_case = np.asarray(weights[7][0] )
__snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# output embeddings
__snake_case = np.asarray(weights[9][0] )
__snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ReformerConfig.from_json_file(_lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
__snake_case = ReformerModelWithLMHead(_lowerCAmelCase )
with open(_lowerCAmelCase , "rb" ) as f:
__snake_case = pickle.load(_lowerCAmelCase )["weights"]
set_model_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 710
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__snake_case = deepcopy(SCREAMING_SNAKE_CASE )
elif os.path.exists(SCREAMING_SNAKE_CASE ):
with io.open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(SCREAMING_SNAKE_CASE )
else:
try:
__snake_case = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE ).decode("utf-8" )
__snake_case = json.loads(SCREAMING_SNAKE_CASE )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__snake_case = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case = self.get_value("zero_optimization.stage" , -1 )
# offload
__snake_case = False
if self.is_zeroa() or self.is_zeroa():
__snake_case = set(["cpu", "nvme"] )
__snake_case = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__snake_case = True
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : List[Any] ) -> str:
'''simple docstring'''
__snake_case = self.config
# find the config node of interest if it exists
__snake_case = ds_key_long.split("." )
__snake_case = nodes.pop()
for node in nodes:
__snake_case = config.get(SCREAMING_SNAKE_CASE )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple=None ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.find_config_node(SCREAMING_SNAKE_CASE )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.config
# find the config node of interest if it exists
__snake_case = ds_key_long.split("." )
for node in nodes:
__snake_case = config
__snake_case = config.get(SCREAMING_SNAKE_CASE )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.get_value(SCREAMING_SNAKE_CASE )
return False if value is None else bool(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.get_value(SCREAMING_SNAKE_CASE )
return False if value is None else not bool(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self._offload
class UpperCamelCase:
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = engine
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
self.engine.backward(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase( _a ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , device_placement=SCREAMING_SNAKE_CASE , scaler=SCREAMING_SNAKE_CASE )
__snake_case = hasattr(self.optimizer , "overflow" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=None ) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase( _a ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase:
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=0.001 , SCREAMING_SNAKE_CASE : Tuple=0 , **SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = params
__snake_case = lr
__snake_case = weight_decay
__snake_case = kwargs
class UpperCamelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=0 , **SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case = optimizer
__snake_case = total_num_steps
__snake_case = warmup_num_steps
__snake_case = kwargs
| 473
| 0
|
'''simple docstring'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
__A= 1, 1
__A= []
for i in range(1,n + 1 ):
__A= prev_numerator + 2 * prev_denominator
__A= prev_numerator + prev_denominator
if len(str(__a ) ) > len(str(__a ) ):
result.append(__a )
__A= numerator
__A= denominator
return len(__a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 186
|
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = '''gpt_neo'''
_snake_case = ['''past_key_values''']
_snake_case = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , a_=5_0_2_5_7 , a_=2_0_4_8 , a_=2_0_4_8 , a_=2_4 , a_=[[["global", "local"], 1_2]] , a_=1_6 , a_=None , a_=2_5_6 , a_="gelu_new" , a_=0.0 , a_=0.0 , a_=0.0 , a_=0.1 , a_=1e-5 , a_=0.02 , a_=True , a_=5_0_2_5_6 , a_=5_0_2_5_6 , **a_ , ) -> Optional[int]:
lowercase : Optional[int] = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = hidden_size
lowercase : List[Any] = num_layers
lowercase : Optional[int] = num_heads
lowercase : Dict = intermediate_size
lowercase : Optional[int] = window_size
lowercase : Optional[int] = activation_function
lowercase : Any = resid_dropout
lowercase : List[str] = embed_dropout
lowercase : Optional[int] = attention_dropout
lowercase : Dict = classifier_dropout
lowercase : Dict = layer_norm_epsilon
lowercase : Optional[Any] = initializer_range
lowercase : str = use_cache
lowercase : Optional[Any] = bos_token_id
lowercase : List[str] = eos_token_id
lowercase : Optional[Any] = attention_types
lowercase : Optional[Any] = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def a__ ( a_ ) -> Any:
lowercase : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( A ,A ,A ,A ) -> Optional[int]:
import torch
lowercase : Optional[Any] = input.size()
lowercase : List[str] = len(A )
lowercase : str = shape[dimension]
lowercase : Optional[Any] = torch.arange(0 ,A ,A )
lowercase : Optional[Any] = torch.div(sizedim - size ,A ,rounding_mode="floor" ) + 1
lowercase : str = torch.arange(A ) + low_indices[:min_length][:, None]
lowercase : Tuple = [slice(A )] * rank
lowercase : Union[str, Any] = indices
lowercase : int = input[s]
lowercase : str = list(range(0 ,rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A )
def _A ( A ,A ) -> List[Any]:
import torch
lowercase : Optional[Any] = torch.arange(1 ,A )
lowercase : Optional[Any] = torch.remainder(A ,A )
lowercase : Dict = remainders == 0
lowercase : Optional[int] = candidates[divisor_indices]
lowercase : Dict = torch.max(A )
return largest_divisor, torch.div(A ,A ,rounding_mode="floor" )
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
lowercase : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowercase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase : Any = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a__ ( self ) -> int:
return self._config.num_heads
def a__ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ) -> Mapping[str, Any]:
lowercase : int = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowercase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase , lowercase : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase : List[Any] = seqlen + 2
lowercase : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Union[str, Any] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowercase : Tuple = common_inputs["attention_mask"]
if self.use_past:
lowercase : Optional[int] = ordered_inputs["attention_mask"].dtype
lowercase : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def a__ ( self ) -> int:
return 1_3
| 425
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( A ) -> List[Tuple[int, ...]]:
lowercase : Any = []
if isinstance(A ,A ):
for v in tree.values():
shapes.extend(_fetch_dims(A ) )
elif isinstance(A ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(A ) )
elif isinstance(A ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( A ,A ) -> Tuple[int, ...]:
lowercase : str = []
for d in reversed(A ):
idx.append(flat_idx % d )
lowercase : int = flat_idx // d
return tuple(reversed(A ) )
@torch.jit.ignore
def _A ( A ,A ,A ,A = None ,A = None ,) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(A ) -> None:
lowercase : int = True
for i in range(len(A ) ):
lowercase : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase : str = l[reversed_idx]
if start_edges is None:
lowercase : Any = [s == 0 for s in start]
reduce_edge_list(A )
if end_edges is None:
lowercase : Optional[int] = [e == (d - 1) for e, d in zip(A ,A )]
reduce_edge_list(A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(A ) == 0:
return [()]
elif len(A ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
lowercase : List[Tuple[slice, ...]] = []
lowercase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(A ,A ):
if s == e:
path_list.append(slice(A ,s + 1 ) )
else:
break
lowercase : Tuple[slice, ...] = tuple(A )
lowercase : Union[str, Any] = len(A )
# start == end, and we're done
if divergence_idx == len(A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase : List[Any] = start[divergence_idx]
return tuple(
path + (slice(A ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase : Any = end[divergence_idx]
return tuple(
path + (slice(A ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( A ,A ,A ,A ) -> torch.Tensor:
lowercase : Optional[int] = t.shape[:no_batch_dims]
lowercase : Optional[int] = list(_flat_idx_to_idx(A ,A ) )
# _get_minimal_slice_set is inclusive
lowercase : Dict = list(_flat_idx_to_idx(flat_end - 1 ,A ) )
# Get an ordered list of slices to perform
lowercase : Optional[Any] = _get_minimal_slice_set(
A ,A ,A ,)
lowercase : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( A ,A ,A ,A ,A = False ,A = None ,A = False ,) -> Any:
if not (len(A ) > 0):
raise ValueError("Must provide at least one input" )
lowercase : List[str] = [shape[:no_batch_dims] for shape in _fetch_dims(A )]
lowercase : Optional[int] = tuple([max(A ) for s in zip(*A )] )
def _prep_inputs(A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase : Union[str, Any] = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
lowercase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase : Dict[str, Any] = tensor_tree_map(_prep_inputs ,A )
lowercase : Optional[int] = None
if _out is not None:
lowercase : Dict = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
lowercase : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase : List[str] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase : int = 0
lowercase : List[str] = prepped_outputs
for _ in range(A ):
# Chunk the input
if not low_mem:
lowercase : Union[str, Any] = _select_chunk
else:
lowercase : Tuple = partial(
_chunk_slice ,flat_start=A ,flat_end=min(A ,i + chunk_size ) ,no_batch_dims=len(A ) ,)
lowercase : Dict[str, Any] = tensor_tree_map(A ,A )
# Run the layer on the chunk
lowercase : Dict = layer(**A )
# Allocate space for the output
if out is None:
lowercase : List[Any] = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,A )
# Put the chunk in its pre-allocated space
if isinstance(A ,A ):
def assign(A ,A ) -> None:
for k, v in da.items():
if isinstance(A ,A ):
assign(A ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase : Optional[int] = da[k]
assign(A ,A )
elif isinstance(A ,A ):
for xa, xa in zip(A ,A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase : Tuple = xa
elif isinstance(A ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase : Tuple = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
lowercase : Optional[int] = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) ,A )
return out
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ = 5_1_2 , ) -> int:
lowercase : Union[str, Any] = max_chunk_size
lowercase : Optional[int] = None
lowercase : Optional[tuple] = None
def a__ ( self , a_ , a_ , a_ ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase : int = [c for c in candidates if c > min_chunk_size]
lowercase : Optional[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(a_ ) -> bool:
try:
with torch.no_grad():
fn(*a_ , chunk_size=a_ )
return True
except RuntimeError:
return False
lowercase : str = 0
lowercase : Dict = len(a_ ) - 1
while i > min_viable_chunk_size_index:
lowercase : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
lowercase : Union[str, Any] = (min_viable_chunk_size_index + i) // 2
else:
lowercase : Any = i
lowercase : str = (i + len(a_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self , a_ , a_ ) -> bool:
lowercase : Optional[Any] = True
for aa, aa in zip(a_ , a_ ):
assert type(a_ ) == type(a_ )
if isinstance(a_ , (list, tuple) ):
consistent &= self._compare_arg_caches(a_ , a_ )
elif isinstance(a_ , a_ ):
lowercase : List[str] = [v for _, v in sorted(aa.items() , key=lambda a_ : x[0] )]
lowercase : int = [v for _, v in sorted(aa.items() , key=lambda a_ : x[0] )]
consistent &= self._compare_arg_caches(a_ , a_ )
else:
consistent &= aa == aa
return consistent
def a__ ( self , a_ , a_ , a_ , ) -> int:
lowercase : str = True
lowercase : tuple = tree_map(lambda a_ : a.shape if isinstance(a_ , torch.Tensor ) else a , a_ , a_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(a_ )
lowercase : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , a_ )
else:
# Otherwise, we can reuse the precomputed value
lowercase : str = False
if not consistent:
lowercase : Union[str, Any] = self._determine_favorable_chunk_size(
a_ , a_ , a_ , )
lowercase : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 425
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : int , lowercase__ : str=1.0 , lowercase__ : Optional[int]=None , lowercase__ : Any=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Tuple = global_rng
SCREAMING_SNAKE_CASE__ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case ( unittest.TestCase ):
def __init__( self : Dict , a_ : int , a_ : List[str]=7 , a_ : Dict=400 , a_ : List[str]=2000 , a_ : Any=2048 , a_ : Optional[Any]=128 , a_ : List[str]=1 , a_ : Any=512 , a_ : List[Any]=30 , a_ : List[str]=4_4100 , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Any = min_seq_length
SCREAMING_SNAKE_CASE__ : Tuple = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : Optional[int] = spectrogram_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_audio_channels
SCREAMING_SNAKE_CASE__ : str = hop_length
SCREAMING_SNAKE_CASE__ : Any = chunk_length
SCREAMING_SNAKE_CASE__ : str = sampling_rate
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase( self : Optional[int] , a_ : List[Any]=False , a_ : Optional[int]=False )-> List[Any]:
"""simple docstring"""
def _flatten(a_ : Optional[Any] ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = TvltFeatureExtractor
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TvltFeatureExtractionTester(self )
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(a_ , 'feature_size' ) )
self.assertTrue(hasattr(a_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(a_ , 'hop_length' ) )
self.assertTrue(hasattr(a_ , 'chunk_length' ) )
self.assertTrue(hasattr(a_ , 'sampling_rate' ) )
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict_first.pop('mel_filters' )
SCREAMING_SNAKE_CASE__ : Any = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(a_ , 'feat_extract.json' )
feat_extract_first.to_json_file(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extraction_class.from_json_file(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = dict_first.pop('mel_filters' )
SCREAMING_SNAKE_CASE__ : List[Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
# Initialize feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE__ : Dict = feature_extractor(
a_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=a_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : int = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase( self : Optional[Any] , a_ : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : str = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : Optional[int] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : str = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1e-4 ) )
| 85
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''torch''', '''torchsde''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 601
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = 'sew-d'
def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase=2 , __lowerCAmelCase=512 , __lowerCAmelCase=256 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=("p2c", "c2p") , __lowerCAmelCase="layer_norm" , __lowerCAmelCase="gelu_python" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-7 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="group" , __lowerCAmelCase="gelu" , __lowerCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCAmelCase=False , __lowerCAmelCase=128 , __lowerCAmelCase=16 , __lowerCAmelCase=True , __lowerCAmelCase=0.0_5 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=0 , __lowerCAmelCase="mean" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(__lowerCAmelCase )
lowercase = list(__lowerCAmelCase )
lowercase = list(__lowerCAmelCase )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = squeeze_factor
lowercase = max_position_embeddings
lowercase = position_buckets
lowercase = share_att_key
lowercase = relative_attention
lowercase = norm_rel_ebd
lowercase = list(__lowerCAmelCase )
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layer_norm_eps
lowercase = feature_layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# sequence classification
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
@property
def A__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 703
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = (IPNDMScheduler,)
snake_case__ : List[str] = (('num_inference_steps', 50),)
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {"""num_train_timesteps""": 1000}
config.update(**__lowerCAmelCase )
return config
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A__ ( self ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , """set_timesteps""" ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowercase = dummy_past_residuals[:]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 197
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return choice(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = random_pivot(__UpperCAmelCase )
# partition based on pivot
# linear time
__SCREAMING_SNAKE_CASE = [e for e in lst if e < pivot]
__SCREAMING_SNAKE_CASE = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCAmelCase ) < k - 1:
return kth_number(__UpperCAmelCase , k - len(__UpperCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : int = 'openai-gpt'
A : str = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE__=40478 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__="cls_index" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Dict = vocab_size
lowercase : List[Any] = n_positions
lowercase : List[Any] = n_embd
lowercase : str = n_layer
lowercase : str = n_head
lowercase : List[Any] = afn
lowercase : Union[str, Any] = resid_pdrop
lowercase : Optional[Any] = embd_pdrop
lowercase : Optional[int] = attn_pdrop
lowercase : Optional[Any] = layer_norm_epsilon
lowercase : int = initializer_range
lowercase : Tuple = summary_type
lowercase : Union[str, Any] = summary_use_proj
lowercase : List[str] = summary_activation
lowercase : List[str] = summary_first_dropout
lowercase : Any = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 319
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def __lowerCamelCase ( __a :Union[str, Any] , __a :Optional[int] = 1_6 ) -> int:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
__UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : int = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __a :int , __a :str ) -> int:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1":
A__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
set_seed(__UpperCAmelCase )
A__ , A__ = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
A__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A__ = os.path.split(__UpperCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A__ = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**__UpperCAmelCase )
A__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A__ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**__UpperCAmelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(__UpperCAmelCase ),
"""epoch""": epoch,
} , step=__UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__UpperCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
A__ = parser.parse_args()
A__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A : Dict = datasets.logging.get_logger(__name__)
A : Optional[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A : int = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A : Union[str, Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowerCamelCase ( __a :Dict , __a :int , __a :int=False , __a :Optional[Any]=False , __a :int=True , __a :Optional[int]=False , __a :Dict="dummy_doc" ) -> Any:
"""simple docstring"""
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
A__ , A__ = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(__a , __a )
A__ = reader.get_mention_assignments(__a , __a )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( __a :Any , __a :Union[str, Any] , __a :List[str] , __a :Dict , __a :str , __a :Tuple , __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = get_coref_infos(__a , __a , __a , __a , __a , __a )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( __a :int ) -> List[Any]:
"""simple docstring"""
A__ = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False ) -> Optional[int]:
"""simple docstring"""
A__ = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 247
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 117
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
UpperCAmelCase__ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = RealmTokenizer
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any="[UNK]" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : List[str]="[PAD]" , __UpperCAmelCase : str="[CLS]" , __UpperCAmelCase : Optional[int]="[MASK]" , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=None , **__UpperCAmelCase : str , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->List[Any]:
"""simple docstring"""
a = PaddingStrategy.MAX_LENGTH
a = text
a = kwargs.pop('''text_pair''' , __UpperCAmelCase )
a = kwargs.pop('''return_tensors''' , __UpperCAmelCase )
a = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__UpperCAmelCase ):
if batch_text_pair is not None:
a = batch_text_pair[idx]
else:
a = None
a = super().__call__(__UpperCAmelCase , __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
a = encoded_candidates.get('''input_ids''' )
a = encoded_candidates.get('''attention_mask''' )
a = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__UpperCAmelCase )
a = {key: item for key, item in output_data.items() if len(__UpperCAmelCase ) != 0}
return BatchEncoding(__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 117
| 1
|
def __lowerCamelCase ( A__ : str ) -> Dict:
lowerCamelCase_ : List[str] = [0] * len(_lowerCamelCase )
lowerCamelCase_ : str = []
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Optional[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
lowerCamelCase_ : Dict = queue.pop(0 )
cnt += 1
topo.append(_lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
if cnt != len(_lowerCamelCase ):
print("""Cycle exists""" )
else:
print(_lowerCamelCase )
# Adjacency List of Graph
snake_case__ : Dict = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 706
|
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None , **A__ : Dict ) -> str:
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()]
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()][: len(A__ )]
lowerCamelCase_ : int = calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 171
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __magic_name__ ( a_ ):
__A : int = "poolformer"
def __init__( self : List[Any] , snake_case__ : Tuple=3 , snake_case__ : Tuple=1_6 , snake_case__ : Any=1_6 , snake_case__ : List[str]=3 , snake_case__ : Union[str, Any]=4.0 , snake_case__ : Optional[Any]=[2, 2, 6, 2] , snake_case__ : Dict=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : int=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Any=[2, 1, 1, 1] , snake_case__ : Dict=4 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Tuple=True , snake_case__ : List[Any]=1e-5 , snake_case__ : Tuple=0.02 , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
lowercase :Union[str, Any] = num_channels
lowercase :List[str] = patch_size
lowercase :str = stride
lowercase :Any = padding
lowercase :List[str] = pool_size
lowercase :Optional[Any] = hidden_sizes
lowercase :int = mlp_ratio
lowercase :int = depths
lowercase :Any = patch_sizes
lowercase :Tuple = strides
lowercase :Any = num_encoder_blocks
lowercase :str = drop_path_rate
lowercase :str = hidden_act
lowercase :List[str] = use_layer_scale
lowercase :Union[str, Any] = layer_scale_init_value
lowercase :Any = initializer_range
super().__init__(**__UpperCamelCase )
class __magic_name__ ( a_ ):
__A : List[str] = version.parse("1.11" )
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return 2e-3
| 677
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( __snake_case ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__snake_case )
def __lowerCAmelCase ( __snake_case ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__lowerCAmelCase = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__snake_case , id=__snake_case )
| 367
| 0
|
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = [10, 20, 30, 40, 50, 60]
a__ : Union[str, Any] = [2, 4, 6, 8, 10, 12]
a__ : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def _snake_case ( self ) -> str:
"""simple docstring"""
self.assertRaisesRegex(__a , "max_weight must greater than zero." )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(__a , "Weight can not be negative." )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(__a , "Profit can not be negative." )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(__a , "max_weight must greater than zero." )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(
__a , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 720
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629
| 0
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any]=13 , lowerCAmelCase__ :Optional[int]=7 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :str=99 , lowerCAmelCase__ :List[str]=64 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :List[str]=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :Optional[Any]=37 , lowerCAmelCase__ :Tuple="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Tuple=512 , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Dict=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : str = is_training
snake_case_ : Dict = use_input_mask
snake_case_ : str = use_token_type_ids
snake_case_ : int = use_labels
snake_case_ : Tuple = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = embedding_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : int = initializer_range
snake_case_ : Optional[int] = num_labels
snake_case_ : Any = num_choices
snake_case_ : List[str] = scope
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Dict = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : int = None
snake_case_ : Any = None
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _A ( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = MegatronBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : List[str] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = MegatronBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = MegatronBertForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] ) -> Dict:
'''simple docstring'''
snake_case_ : str = MegatronBertForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = MegatronBertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _A ( self :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = MegatronBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.num_labels
snake_case_ : Dict = MegatronBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.num_labels
snake_case_ : Union[str, Any] = MegatronBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.num_choices
snake_case_ : List[Any] = MegatronBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Any = config_and_inputs
snake_case_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict=False ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def _A ( self :int ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = MegatronBertModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase__ )
def _A ( self :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase__ )
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase__ )
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
return torch.tensor(
SCREAMING_SNAKE_CASE_ ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE_ ,)
__lowerCamelCase : List[Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Dict = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
snake_case_ : Tuple = os.path.join(os.environ["MYDIR"] , lowerCAmelCase__ )
snake_case_ : Dict = MegatronBertModel.from_pretrained(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.half()
snake_case_ : Any = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
snake_case_ : List[str] = model(lowerCAmelCase__ )[0]
snake_case_ : Optional[Any] = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Tuple = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
snake_case_ : List[Any] = output[0, ii, jj]
snake_case_ : Any = expected[3 * ii + jj]
snake_case_ : Optional[Any] = "ii={} jj={} a={} b={}".format(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertTrue(math.isclose(lowerCAmelCase__ , lowerCAmelCase__ , rel_tol=lowerCAmelCase__ , abs_tol=lowerCAmelCase__ ) , msg=lowerCAmelCase__ )
| 653
|
"""simple docstring"""
from manim import *
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('CPU' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('GPU' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Model' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Loaded Checkpoint' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
ckpt_arr.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Disk' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(FadeOut(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , )
self.wait()
| 247
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Dict = '''roberta-prelayernorm'''
def __init__( self , _SCREAMING_SNAKE_CASE=5_02_65 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class _snake_case ( a_ ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 514
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 514
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) )
UpperCAmelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , a__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
# load decoder from hub
UpperCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def __snake_case ( self : int , **a__ : Optional[int] ):
UpperCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(a__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __snake_case ( self : int , **a__ : str ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a__ )
def __snake_case ( self : List[str] , **a__ : Optional[Any] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a__ )
def __snake_case ( self : int ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(a__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=a__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = floats_list((3, 1000) )
UpperCAmelCase = feature_extractor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self : Any ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = '''This is a test string'''
UpperCAmelCase = processor(text=a__ )
UpperCAmelCase = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : int , a__ : int=(2, 10, 16) , a__ : List[Any]=77 ):
np.random.seed(a__ )
return np.random.rand(*a__ )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase = processor.decode(a__ )
UpperCAmelCase = decoder.decode_beams(a__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __snake_case ( self : Any , a__ : str ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase = processor.batch_decode(a__ )
else:
with get_context(a__ ).Pool() as pool:
UpperCAmelCase = processor.batch_decode(a__ , a__ )
UpperCAmelCase = list(a__ )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase = decoder.decode_beams_batch(a__ , a__ )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(a__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(a__ , decoded_processor.logit_score )
self.assertListEqual(a__ , decoded_processor.lm_score )
def __snake_case ( self : Dict ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = 15
UpperCAmelCase = -20.0
UpperCAmelCase = -4.0
UpperCAmelCase = processor.batch_decode(
a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
UpperCAmelCase = decoded_processor_out.text
UpperCAmelCase = list(a__ )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase = decoder.decode_beams_batch(
a__ , a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , a__ )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , a__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , a__ , atol=1e-3 ) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = 2.0
UpperCAmelCase = 5.0
UpperCAmelCase = -20.0
UpperCAmelCase = True
UpperCAmelCase = processor.batch_decode(
a__ , alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
UpperCAmelCase = decoded_processor_out.text
UpperCAmelCase = list(a__ )
decoder.reset_params(
alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase = decoder.decode_beams_batch(
a__ , a__ , )
UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , a__ )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(a__ , a__ )
def __snake_case ( self : str ):
UpperCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(a__ )
UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = os.listdir(a__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(a__ , a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = floats_list((3, 1000) )
UpperCAmelCase = processor_wavaveca(a__ , return_tensors='''np''' )
UpperCAmelCase = processor_auto(a__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = processor_wavaveca.batch_decode(a__ )
UpperCAmelCase = processor_auto.batch_decode(a__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_decoder()
UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __snake_case ( a__ : Tuple , a__ : int ):
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self : Dict ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = self._get_dummy_logits()[0]
UpperCAmelCase = processor.decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __snake_case ( self : Any ):
UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase = self._get_dummy_logits()
UpperCAmelCase = processor.batch_decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self : Dict ):
import torch
UpperCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=a__ )
UpperCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
UpperCAmelCase = iter(a__ )
UpperCAmelCase = next(a__ )
UpperCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase = model(a__ ).logits.cpu().numpy()
UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=a__ )
UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , a__ )
self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , output.text )
# output times
UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''start_time''' ) )
UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''end_time''' ) )
# fmt: off
UpperCAmelCase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
UpperCAmelCase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
| 51
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase )
| 687
| 0
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , __UpperCamelCase : List[str]="</s>" , __UpperCamelCase : List[str]="<unk>" , __UpperCamelCase : str="<pad>" , __UpperCamelCase : Tuple=1_2_5 , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : Any , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ = [f"""<extra_id_{i}>""" for i in range(__UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ = len(set(filter(lambda __UpperCamelCase : bool("""extra_id""" in str(__UpperCamelCase ) ) , __UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case__ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
snake_case__ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
snake_case__ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
super().__init__(
eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , extra_ids=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ = extra_ids
snake_case__ = 2**8 # utf is 8 bits
# define special tokens dict
snake_case__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case__ = len(self.special_tokens_encoder )
snake_case__ = len(__UpperCamelCase )
for i, token in enumerate(__UpperCamelCase ):
snake_case__ = self.vocab_size + i - n
snake_case__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCAmelCase( self : List[Any] , __UpperCamelCase : List[int] ):
'''simple docstring'''
if len(__UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
snake_case__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
snake_case__ = self._add_eos_if_not_present(__UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ = self._add_eos_if_not_present(__UpperCamelCase )
return token_ids_a + token_ids_a
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = [chr(__UpperCamelCase ) for i in text.encode("""utf-8""" )]
return tokens
def __lowerCAmelCase( self : str , __UpperCamelCase : Dict ):
'''simple docstring'''
if token in self.special_tokens_encoder:
snake_case__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case__ = self.added_tokens_encoder[token]
elif len(__UpperCamelCase ) != 1:
snake_case__ = self.unk_token_id
else:
snake_case__ = ord(__UpperCamelCase ) + self._num_special_tokens
return token_id
def __lowerCAmelCase( self : str , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
snake_case__ = self.special_tokens_decoder[index]
else:
snake_case__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase( self : Optional[int] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case__ = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case__ = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case__ = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case__ = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case__ = token.encode("""utf-8""" )
else:
snake_case__ = bytes([ord(__UpperCamelCase )] )
bstring += tok_string
snake_case__ = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __lowerCAmelCase( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 566
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a__ = 2_048
a__ = 4_096
a__ = 42
a__ = os.environ.pop('''PROCESS_TRAIN''', '''false''')
a__ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def snake_case__ ( a ) -> Optional[Any]:
'''simple docstring'''
def choose_first(a , a=False ):
assert isinstance(a , a )
if len(a ) == 1:
snake_case__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case__ = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
snake_case__ = {"""id""": example["""id"""]}
snake_case__ = example["""annotations"""]
snake_case__ = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case__ = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
snake_case__ = snake_case__ = []
snake_case__ = snake_case__ = []
snake_case__ = ["""<cls>"""]
else:
snake_case__ = ["""short"""]
snake_case__ = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
snake_case__ = ["""long"""]
snake_case__ = choose_first(annotation["""long_answer"""] , is_long_answer=a )
snake_case__ = []
answer.update(a )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case__ = True
else:
snake_case__ = False
snake_case__ = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , a ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def snake_case__ ( a , a=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ = _get_single_answer(a )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case__ = example["""document"""]["""tokens"""]
snake_case__ = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(a ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case__ = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case__ = example["""document"""]["""tokens"""]
snake_case__ = answer["""start_token"""]
snake_case__ = answer["""end_token"""]
snake_case__ = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case__ = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case__ = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case__ = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case__ = """ """.join([old[i] for i in range(len(a ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , a , end="""\n""" )
print("""Old:""" , a , end="""\n\n""" )
return {
"context": " ".join(a ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def snake_case__ ( a , a , a=2048 , a=4096 , a=True ) -> Tuple:
'''simple docstring'''
snake_case__ = get_context_and_ans(a , assertion=a )
snake_case__ = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case__ = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
snake_case__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case__ = []
snake_case__ = []
snake_case__ = input_ids[:q_len]
snake_case__ = range(a , len(a ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case__ = i + max_length - q_len
snake_case__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(a ),
"end_token": [-100] * len(a ),
"category": category,
},
}
snake_case__ = out["""context"""].split()
snake_case__ = splitted_context[answer["""end_token"""]]
snake_case__ = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=a , ).input_ids )
snake_case__ = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=a ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case__ = len(tokenizer(a , add_special_tokens=a ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case__ = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
snake_case__ = answer["""start_token"""]
snake_case__ = answer["""end_token"""]
if assertion:
snake_case__ = tokenizer.decode(a )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , a , end="""\n\n""" )
if len(a ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case__ = input_ids[:q_len]
snake_case__ = range(a , len(a ) , max_length - doc_stride )
snake_case__ = []
snake_case__ = []
snake_case__ = []
snake_case__ = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case__ = i + max_length - q_len
snake_case__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case__ = start_token - i + q_len
snake_case__ = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
snake_case__ = -100
snake_case__ = -100
answers_category.append("""null""" )
snake_case__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(a )
answers_end_token.append(a )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(a ) )
print("""Old:""" , tokenizer.decode(a ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def snake_case__ ( a , a , a=2048 , a=4096 , a=False ) -> List[str]:
'''simple docstring'''
snake_case__ = get_strided_contexts_and_ans(
a , a , doc_stride=a , max_length=a , assertion=a , )
return example
def snake_case__ ( a , a ) -> Optional[int]:
'''simple docstring'''
with jsonlines.open(a , """a""" ) as writer:
for example in tqdm(a , total=len(a ) , desc="""Saving samples ... """ ):
snake_case__ = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
a__ = load_dataset('''natural_questions''')
a__ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
a__ = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
a__ = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
a__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
a__ = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
a__ = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 566
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.