code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ = get_tests_dir('fixtures/dummy-config.json')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AutoConfig.for_model("roberta" )
self.assertIsInstance(a , a )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a , "fake-roberta" )
os.makedirs(a , exist_ok=a )
with open(os.path.join(a , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(a )
self.assertEqual(type(a ) , a )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register("custom" , a )
# Wrong model type will raise an error
with self.assertRaises(a ):
AutoConfig.register("model" , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoConfig.register("bert" , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
a , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("bert-base" )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
a , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(a , revision="aaaaaa" )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
a , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='new-model'
try:
AutoConfig.register("new-model" , a )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 25
|
'''simple docstring'''
import math
import qiskit
def lowerCamelCase_ ( __UpperCamelCase : int = 1 , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(__UpperCamelCase , __UpperCamelCase )
or isinstance(__UpperCamelCase , __UpperCamelCase )
or isinstance(__UpperCamelCase , __UpperCamelCase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_A = qiskit.QuantumRegister(4 , 'qr' )
_A = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_A = [input_a, input_a, carry_in]
_A = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __UpperCamelCase ) # measure the last two qbits
_A = qiskit.Aer.get_backend('aer_simulator' )
_A = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1_0_0_0 )
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 292
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCAmelCase : List[Any] ):
"""simple docstring"""
if isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCAmelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : Union[str, Any] = ["pixel_values"]
def __init__( self : Any , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 2_5_5 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , **_lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowerCamelCase : List[Any] = size if size is not None else {"""shortest_edge""": 2_5_6}
__lowerCamelCase : Optional[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
__lowerCamelCase : Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__lowerCamelCase : int = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : int = size
__lowerCamelCase : Optional[int] = do_center_crop
__lowerCamelCase : Union[str, Any] = crop_size
__lowerCamelCase : str = resample
__lowerCamelCase : int = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Union[str, Any] = offset
__lowerCamelCase : str = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : List[str] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ):
'''simple docstring'''
__lowerCamelCase : str = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" in size:
__lowerCamelCase : int = get_resize_output_image_size(_lowerCamelCase , size["""shortest_edge"""] , default_to_square=_lowerCamelCase )
elif "height" in size and "width" in size:
__lowerCamelCase : int = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Optional[int] , ):
'''simple docstring'''
__lowerCamelCase : str = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Any , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[int, float] , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Optional[int] , ):
'''simple docstring'''
__lowerCamelCase : Any = image.astype(np.floataa )
if offset:
__lowerCamelCase : str = image - (scale / 2)
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Tuple , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Any , ):
'''simple docstring'''
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Dict , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = None , _lowerCamelCase : float = None , _lowerCamelCase : bool = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase : str = to_numpy_array(_lowerCamelCase )
if do_resize:
__lowerCamelCase : List[Any] = self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase )
if do_center_crop:
__lowerCamelCase : Union[str, Any] = self.center_crop(_lowerCamelCase , size=_lowerCamelCase )
if do_rescale:
__lowerCamelCase : Tuple = self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase , offset=_lowerCamelCase )
if do_normalize:
__lowerCamelCase : Union[str, Any] = self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase )
__lowerCamelCase : Dict = to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase )
return image
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = None , _lowerCamelCase : float = None , _lowerCamelCase : bool = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCamelCase : str , ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Optional[Any] = resample if resample is not None else self.resample
__lowerCamelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Dict = offset if offset is not None else self.offset
__lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : List[str] = image_std if image_std is not None else self.image_std
__lowerCamelCase : Any = size if size is not None else self.size
__lowerCamelCase : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
__lowerCamelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : int = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__lowerCamelCase : Tuple = make_batched(_lowerCamelCase )
__lowerCamelCase : int = [
[
self._preprocess_image(
image=_lowerCamelCase , do_resize=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , do_center_crop=_lowerCamelCase , crop_size=_lowerCamelCase , do_rescale=_lowerCamelCase , rescale_factor=_lowerCamelCase , offset=_lowerCamelCase , do_normalize=_lowerCamelCase , image_mean=_lowerCamelCase , image_std=_lowerCamelCase , data_format=_lowerCamelCase , )
for img in video
]
for video in videos
]
__lowerCamelCase : Optional[Any] = {"""pixel_values""": videos}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 458
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase : Optional[int] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__lowerCamelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
__lowerCamelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
__lowerCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase : List[str] = value
elif weight_type == "weight_g":
__lowerCamelCase : Dict = value
elif weight_type == "weight_v":
__lowerCamelCase : str = value
elif weight_type == "bias":
__lowerCamelCase : Optional[Any] = value
else:
__lowerCamelCase : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
"""simple docstring"""
__lowerCamelCase : int = []
__lowerCamelCase : int = fairseq_model.state_dict()
__lowerCamelCase : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase : int = True
if "*" in mapped_key:
__lowerCamelCase : Optional[int] = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
__lowerCamelCase : Optional[int] = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
__lowerCamelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : Dict = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : List[str] = """weight"""
else:
__lowerCamelCase : List[str] = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase : List[Any] = name.split(""".""" )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
__lowerCamelCase : Any = torch.load(UpperCAmelCase )
__lowerCamelCase : Optional[int] = WavLMConfigOrig(checkpoint["""cfg"""] )
__lowerCamelCase : Any = WavLMOrig(UpperCAmelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__lowerCamelCase : Union[str, Any] = WavLMConfig.from_pretrained(UpperCAmelCase )
else:
__lowerCamelCase : str = WavLMConfig()
__lowerCamelCase : Dict = WavLMModel(UpperCAmelCase )
recursively_load_weights(UpperCAmelCase , UpperCAmelCase )
hf_wavlm.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCamelCase : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 458
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = '''resnet'''
UpperCamelCase__ : Union[str, Any] = ['''basic''', '''bottleneck''']
def __init__( self , _A=3 , _A=64 , _A=[256, 512, 1_024, 2_048] , _A=[3, 4, 6, 3] , _A="bottleneck" , _A="relu" , _A=False , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embedding_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = downsample_in_first_stage
__SCREAMING_SNAKE_CASE = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_A ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = version.parse('''1.11''' )
@property
def _A ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _A ( self ):
'''simple docstring'''
return 1e-3
| 148
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : int ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : List[Any] ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : Any ={
'''gpt-neox-20b''': 2048,
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self , _A=None , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(_A , pre_tok_state.pop('type' ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**_A )
__SCREAMING_SNAKE_CASE = add_prefix_space
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 148
| 1
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class A__ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : int = "A painting of a squirrel eating a burger "
_UpperCAmelCase : Dict = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Dict = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = "A painting of a squirrel eating a burger "
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : str = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
_UpperCAmelCase : Dict = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 715
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any=sys.maxsize ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "bilinear"
_UpperCAmelCase : Union[str, Any] = max_size
_UpperCAmelCase : Tuple = short_edge_length
def __call__( self : List[str] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
for img in imgs:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
_UpperCAmelCase : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_UpperCAmelCase : List[Any] = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
_UpperCAmelCase , _UpperCAmelCase : str = size, scale * w
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size:
_UpperCAmelCase : List[Any] = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = newh * scale
_UpperCAmelCase : str = neww * scale
_UpperCAmelCase : Union[str, Any] = int(neww + 0.5 )
_UpperCAmelCase : Optional[int] = int(newh + 0.5 )
if img.dtype == np.uinta:
_UpperCAmelCase : Optional[Any] = Image.fromarray(lowerCAmelCase__ )
_UpperCAmelCase : Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_UpperCAmelCase : Optional[Any] = np.asarray(lowerCAmelCase__ )
else:
_UpperCAmelCase : Tuple = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_UpperCAmelCase : str = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 )
img_augs.append(lowerCAmelCase__ )
return img_augs
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_UpperCAmelCase : str = cfg.INPUT.FORMAT
_UpperCAmelCase : List[Any] = cfg.SIZE_DIVISIBILITY
_UpperCAmelCase : int = cfg.PAD_VALUE
_UpperCAmelCase : Optional[int] = cfg.INPUT.MAX_SIZE_TEST
_UpperCAmelCase : Tuple = cfg.MODEL.DEVICE
_UpperCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCAmelCase : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCAmelCase : Any = lambda lowerCAmelCase__ : (x - self.pixel_mean) / self.pixel_std
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) )
_UpperCAmelCase : str = [im.shape[-2:] for im in images]
_UpperCAmelCase : Any = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
def __call__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=False ) -> str:
"""simple docstring"""
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [images]
if single_image:
assert len(lowerCAmelCase__ ) == 1
for i in range(len(lowerCAmelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_UpperCAmelCase : Any = torch.tensor([im.shape[:2] for im in images] )
_UpperCAmelCase : str = self.aug(lowerCAmelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_UpperCAmelCase : int = [self.normalizer(lowerCAmelCase__ ) for x in images]
# now pad them to do the following operations
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.pad(lowerCAmelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_UpperCAmelCase : int = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( a_: Tuple, a_: Tuple[int, int] ):
assert torch.isfinite(a_ ).all(), "Box tensor contains infinite or NaN!"
_UpperCAmelCase , _UpperCAmelCase : Tuple = box_size
tensor[:, 0].clamp_(min=0, max=a_ )
tensor[:, 1].clamp_(min=0, max=a_ )
tensor[:, 2].clamp_(min=0, max=a_ )
tensor[:, 3].clamp_(min=0, max=a_ )
| 257
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=32 , _a=2 , _a=3 , _a=16 , _a=[1, 2, 1] , _a=[2, 2, 4] , _a=2 , _a=2.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=True , _a=0.02 , _a=1e-5 , _a=True , _a=None , _a=True , _a=10 , _a=8 , _a=["stage1", "stage2", "stage3"] , _a=[1, 2, 3] , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = embed_dim
lowerCamelCase = depths
lowerCamelCase = num_heads
lowerCamelCase = window_size
lowerCamelCase = mlp_ratio
lowerCamelCase = qkv_bias
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = drop_path_rate
lowerCamelCase = hidden_act
lowerCamelCase = use_absolute_embeddings
lowerCamelCase = patch_norm
lowerCamelCase = layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = is_training
lowerCamelCase = scope
lowerCamelCase = use_labels
lowerCamelCase = type_sequence_label_size
lowerCamelCase = encoder_stride
lowerCamelCase = out_features
lowerCamelCase = out_indices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinBackbone(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_a ):
lowerCamelCase = ["""stem"""]
lowerCamelCase = MaskFormerSwinBackbone(config=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ):
"""simple docstring"""
return
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(_a , _a ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) , _a )
# Swin has a different seq_length
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_a ):
lowerCamelCase = 0
return t
def check_equivalence(_a , _a , _a , _a={} ):
with torch.no_grad():
lowerCamelCase = model(**_a , return_dict=_a , **_a )
lowerCamelCase = model(**_a , return_dict=_a , **_a ).to_tuple()
def recursive_check(_a , _a ):
if isinstance(_a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_a , _a ):
recursive_check(_a , _a )
elif isinstance(_a , _a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_a , _a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_a ) , set_nan_tensor_to_zero(_a ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}. Dict has'
f' `nan`: {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}.'
) , )
recursive_check(_a , _a )
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
lowerCamelCase = self._prepare_for_class(_a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
@require_torch
class __magic_name__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase = MaskFormerSwinConfig
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinModelTester(self )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowerCamelCase = backbone_class(_a )
backbone.to(_a )
backbone.eval()
lowerCamelCase = backbone(**_a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCamelCase = backbone(**_a , output_hidden_states=_a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCamelCase , lowerCamelCase , lowerCamelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCamelCase = backbone(**_a , output_attentions=_a )
self.assertIsNotNone(outputs.attentions )
| 543
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=32 , _a=2 , _a=3 , _a=16 , _a=[1, 2, 1] , _a=[2, 2, 4] , _a=2 , _a=2.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=True , _a=0.02 , _a=1e-5 , _a=True , _a=None , _a=True , _a=10 , _a=8 , _a=["stage1", "stage2", "stage3"] , _a=[1, 2, 3] , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = embed_dim
lowerCamelCase = depths
lowerCamelCase = num_heads
lowerCamelCase = window_size
lowerCamelCase = mlp_ratio
lowerCamelCase = qkv_bias
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = drop_path_rate
lowerCamelCase = hidden_act
lowerCamelCase = use_absolute_embeddings
lowerCamelCase = patch_norm
lowerCamelCase = layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = is_training
lowerCamelCase = scope
lowerCamelCase = use_labels
lowerCamelCase = type_sequence_label_size
lowerCamelCase = encoder_stride
lowerCamelCase = out_features
lowerCamelCase = out_indices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinBackbone(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_a ):
lowerCamelCase = ["""stem"""]
lowerCamelCase = MaskFormerSwinBackbone(config=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ):
"""simple docstring"""
return
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(_a , _a ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) , _a )
# Swin has a different seq_length
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_a ):
lowerCamelCase = 0
return t
def check_equivalence(_a , _a , _a , _a={} ):
with torch.no_grad():
lowerCamelCase = model(**_a , return_dict=_a , **_a )
lowerCamelCase = model(**_a , return_dict=_a , **_a ).to_tuple()
def recursive_check(_a , _a ):
if isinstance(_a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_a , _a ):
recursive_check(_a , _a )
elif isinstance(_a , _a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_a , _a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_a ) , set_nan_tensor_to_zero(_a ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}. Dict has'
f' `nan`: {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}.'
) , )
recursive_check(_a , _a )
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
lowerCamelCase = self._prepare_for_class(_a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a )
lowerCamelCase = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
@require_torch
class __magic_name__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase = MaskFormerSwinConfig
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = MaskFormerSwinModelTester(self )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowerCamelCase = backbone_class(_a )
backbone.to(_a )
backbone.eval()
lowerCamelCase = backbone(**_a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCamelCase = backbone(**_a , output_hidden_states=_a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCamelCase , lowerCamelCase , lowerCamelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCamelCase = backbone(**_a , output_attentions=_a )
self.assertIsNotNone(outputs.attentions )
| 543
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def UpperCamelCase__ ( self : List[str] , __a : Dict ):
return self.lineara(self.batchnorm(self.lineara(_SCREAMING_SNAKE_CASE ) ) )
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , model.state_dict() )
_a = os.path.join(_SCREAMING_SNAKE_CASE , "index.json" )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_a = os.path.join(_SCREAMING_SNAKE_CASE , f'{key}.dat' )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase__ ( self : Dict ):
_a = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_a = torch.randn(2 , 3 , dtype=_SCREAMING_SNAKE_CASE )
with TemporaryDirectory() as tmp_dir:
_a = offload_weight(_SCREAMING_SNAKE_CASE , "weight" , _SCREAMING_SNAKE_CASE , {} )
_a = os.path.join(_SCREAMING_SNAKE_CASE , "weight.dat" )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"weight": {"shape": [2, 3], "dtype": str(_SCREAMING_SNAKE_CASE ).split("." )[1]}} )
_a = load_offloaded_weight(_SCREAMING_SNAKE_CASE , index["weight"] )
self.assertTrue(torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCamelCase__ ( self : List[str] ):
_a = ModelForTest()
_a = model.state_dict()
_a = {k: v for k, v in state_dict.items() if "linear2" not in k}
_a = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_a = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE , save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , weight_map[key] ) )
_a = {k: v for k, v in state_dict.items() if "weight" in k}
_a = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_a = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE , save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Duplicates are removed
_a = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE , save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , weight_map[key] ) )
def UpperCamelCase__ ( self : Any ):
_a = {"a.1": 0, "a.10": 1, "a.2": 2}
_a = extract_submodules_state_dict(_SCREAMING_SNAKE_CASE , ["a.1", "a.2"] )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"a.1": 0, "a.2": 2} )
_a = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
_a = extract_submodules_state_dict(_SCREAMING_SNAKE_CASE , ["a.1", "a.2"] )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"a.1.a": 0, "a.2.a": 2} )
| 709
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowerCAmelCase_ : Union[str, Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowerCAmelCase_ : Tuple = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : str , __a : int=None , __a : Dict=True , __a : Optional[int]=False ):
if rouge_types is None:
_a = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_a = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
_a = scoring.BootstrapAggregator()
else:
_a = []
for ref, pred in zip(__a , __a ):
_a = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
_a = aggregator.aggregate()
else:
_a = {}
for key in scores[0]:
_a = [score[key] for score in scores]
return result
| 521
| 0
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
SCREAMING_SNAKE_CASE : Any = 0B101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
SCREAMING_SNAKE_CASE : str = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :Any = WATERMARK_BITS
lowercase_ :Any = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCamelCase ( self , UpperCamelCase_ ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
lowercase_ :Dict = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ :Union[str, Any] = [self.encoder.encode(UpperCamelCase_ , '''dwtDct''' ) for image in images]
lowercase_ :Optional[int] = torch.from_numpy(np.array(UpperCamelCase_ ) ).permute(0 , 3 , 1 , 2 )
lowercase_ :Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 257
|
def UpperCamelCase ( _a = 1 , _a = 1_0_0_0 ) -> int:
'''simple docstring'''
lowercase_ :str = 1
lowercase_ :Union[str, Any] = 0
for divide_by_number in range(_a , digit + 1 ):
lowercase_ :list[int] = []
lowercase_ :Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_a ):
lowercase_ :Optional[Any] = len(_a )
lowercase_ :str = divide_by_number
else:
has_been_divided.append(_a )
lowercase_ :str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = IFInpaintingPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._get_dummy_components()
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 718
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
if latents is None:
SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._execution_device
SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint}
SCREAMING_SNAKE_CASE : Dict = self.unet(
sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0]
# post-processing
SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 79
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCamelCase : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCamelCase : str = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list[int]]:
snake_case : List[Any] = []
for i in range(len(lowercase ) ):
snake_case : Tuple = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case : List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case : List[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase )
return next_generation
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[Image.Image]:
snake_case : Tuple = []
for _ in range(lowercase ):
# Create output image
snake_case : Tuple = Image.new("""RGB""" ,(len(cells[0] ), len(lowercase )) )
snake_case : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(lowercase ) ):
for y in range(len(cells[0] ) ):
snake_case : Any = 255 - cells[y][x] * 255
snake_case : int = (colour, colour, colour)
# Save image
images.append(lowercase )
snake_case : List[str] = new_generation(lowercase )
return images
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = generate_images(GLIDER, 1_6)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 587
|
import tensorflow as tf
from ...tf_utils import shape_list
class _lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
__snake_case : int = vocab_size
__snake_case : str = d_embed
__snake_case : List[Any] = d_proj
__snake_case : List[str] = cutoffs + [vocab_size]
__snake_case : str = [0] + self.cutoffs
__snake_case : Union[str, Any] = div_val
__snake_case : List[str] = self.cutoffs[0]
__snake_case : Any = len(self.cutoffs ) - 1
__snake_case : Dict = self.shortlist_size + self.n_clusters
__snake_case : Dict = keep_order
__snake_case : List[str] = []
__snake_case : Union[str, Any] = []
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
__snake_case : Optional[int] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=UpperCAmelCase , name="cluster_weight" )
__snake_case : Any = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=UpperCAmelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__snake_case : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_projs_._{i}""" , )
self.out_projs.append(UpperCAmelCase )
else:
self.out_projs.append(UpperCAmelCase )
__snake_case : Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
__snake_case : int = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case : List[Any] = self.d_embed // (self.div_val**i)
__snake_case : Optional[int] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_projs_._{i}""" )
self.out_projs.append(UpperCAmelCase )
__snake_case : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
__snake_case : Optional[int] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = x
if proj is not None:
__snake_case : List[str] = tf.einsum("ibd,ed->ibe" , UpperCAmelCase , UpperCAmelCase )
return tf.einsum("ibd,nd->ibn" , UpperCAmelCase , UpperCAmelCase ) + b
@staticmethod
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : Any = shape_list(UpperCAmelCase )
__snake_case : Optional[int] = tf.range(lp_size[0] , dtype=target.dtype )
__snake_case : Union[str, Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = 0
if self.n_clusters == 0:
__snake_case : int = self._logit(UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__snake_case : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCAmelCase , logits=UpperCAmelCase )
__snake_case : int = tf.nn.log_softmax(UpperCAmelCase , axis=-1 )
else:
__snake_case : Optional[int] = shape_list(UpperCAmelCase )
__snake_case : List[Any] = []
__snake_case : str = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__snake_case : Optional[Any] = (target >= l_idx) & (target < r_idx)
__snake_case : Union[str, Any] = tf.where(UpperCAmelCase )
__snake_case : Optional[Any] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase ) - l_idx
if self.div_val == 1:
__snake_case : Dict = self.out_layers[0][0][l_idx:r_idx]
__snake_case : int = self.out_layers[0][1][l_idx:r_idx]
else:
__snake_case : Union[str, Any] = self.out_layers[i][0]
__snake_case : Optional[int] = self.out_layers[i][1]
if i == 0:
__snake_case : Any = tf.concat([cur_W, self.cluster_weight] , 0 )
__snake_case : str = tf.concat([cur_b, self.cluster_bias] , 0 )
__snake_case : Dict = self._logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.out_projs[0] )
__snake_case : List[Any] = tf.nn.log_softmax(UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__snake_case : int = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : List[str] = self._gather_logprob(UpperCAmelCase , UpperCAmelCase )
else:
__snake_case : int = self._logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.out_projs[i] )
__snake_case : Optional[Any] = tf.nn.log_softmax(UpperCAmelCase )
__snake_case : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__snake_case : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCAmelCase )
if target is not None:
__snake_case : Optional[Any] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : Optional[int] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : Optional[int] = self._gather_logprob(UpperCAmelCase , UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCAmelCase , -cur_logprob , shape_list(UpperCAmelCase ) )
__snake_case : Dict = tf.concat(UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
__snake_case : int = tf.reduce_mean(UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCAmelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 243
| 0
|
from statistics import mean, stdev
def A__ ( lowercase: list, lowercase: int = 3 ) -> list:
A : int =min(lowercase )
A : int =max(lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min), lowercase ) for x in data]
def A__ ( lowercase: list, lowercase: int = 3 ) -> list:
A : Optional[int] =mean(lowercase )
A : Union[str, Any] =stdev(lowercase )
# standardize data
return [round((x - mu) / (sigma), lowercase ) for x in data]
| 661
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661
| 1
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = 9, 14 # noqa: F841
__lowercase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowercase = defaultdict(A__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowercase = mst(A__ )
__lowercase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowercase = tuple(answer[:2] )
__lowercase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 41
|
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = WavaVecaPhonemeCTCTokenizer
A__ = False
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__snake_case : Optional[Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
__snake_case : Tuple = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
def A_ ( self : Tuple , __a : Any , __a : str=False , __a : Tuple=20 , __a : int=5 ) -> Tuple[str, list]:
'''simple docstring'''
__snake_case : Any = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
__snake_case : Optional[int] = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
__snake_case : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
__snake_case : Tuple = ' ' + output_txt
__snake_case : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def A_ ( self : Union[str, Any] , **__a : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
__snake_case : Optional[Any] = tokenizer('m xxx ɪ' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
__snake_case : Union[str, Any] = tokenizer('m aaa ɪ ccc' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__snake_case : Dict = tokenizer('maɪ c' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 200] ) # mai should be <unk> (=3)
def A_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how are you'
__snake_case : Dict = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : Tuple = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : str = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__snake_case : Tuple = tokenizer.decode(sample_ids[0] )
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__snake_case : Dict = tokenizer.decode(sample_ids[0] )
__snake_case : Tuple = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
__snake_case : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
__snake_case : Optional[int] = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Any = 'Hello how are you'
__snake_case : Optional[int] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[int] = 'Hello how are you'
__snake_case : List[Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , __a )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=__a )
__snake_case : Any = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='en-us' ).input_ids
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(__a , __a )
__snake_case : str = tokenizer.decode(__a )
__snake_case : int = tokenizer.decode(__a )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(__a , 'ɛ l o h aʊ a ʁ j u' )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how Are you'
__snake_case : Optional[Any] = 'hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a ).input_ids
__snake_case : Any = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
__snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def A_ ( __a : Any , __a : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : str = [d[key] for d in offsets]
return retrieved_list
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__snake_case : int = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__snake_case : Any = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(__a : int , __a : Union[str, Any] ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
__snake_case : Optional[int] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(__a : Any , __a : str ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__snake_case : List[str] = tokenizer.batch_decode(__a , output_char_offsets=__a )
__snake_case : str = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
__snake_case : int = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : int = tokenizer.vocab_size
__snake_case : List[Any] = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
__snake_case : Optional[int] = tokenizer.add_tokens(__a )
__snake_case : Optional[int] = tokenizer.vocab_size
__snake_case : Tuple = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
__snake_case : Optional[int] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Tuple = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__snake_case : Optional[Any] = tokenizer.add_special_tokens(__a )
__snake_case : int = tokenizer.vocab_size
__snake_case : Any = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
__snake_case : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : str ) -> Any:
'''simple docstring'''
pass
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__snake_case : Optional[int] = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Union[str, Any] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__snake_case : List[Any] = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output['text'] , __a )
| 286
| 0
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : float , lowercase : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(lowercase ) , lowercase )
return number - int(lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 705
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=4 , ) -> Dict:
'''simple docstring'''
snake_case : Any = parent
snake_case : int = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Dict = is_training
snake_case : str = use_attention_mask
snake_case : List[Any] = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : str = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : int = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : str = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : Dict = num_choices
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[int] = None
if self.use_attention_mask:
snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case ,snake_case : List[Any] = config_and_inputs
snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case ,snake_case : List[Any] = config_and_inputs
snake_case : int = True
snake_case : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : int = True
__UpperCAmelCase : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Dict = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case : Dict = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase__ )
snake_case : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 117
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Dict = hidden_states.shape
UpperCamelCase__ : Union[str, Any] = jax.image.resize(
__UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
UpperCamelCase__ : Optional[Any] = self.conv(__UpperCAmelCase )
return hidden_states
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.conv(__UpperCAmelCase )
return hidden_states
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Dict = self.in_channels if self.out_channels is None else self.out_channels
UpperCamelCase__ : Dict = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
UpperCamelCase__ : Optional[Any] = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase__ : Any = nn.Dense(__UpperCAmelCase , dtype=self.dtype )
UpperCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
UpperCamelCase__ : Any = nn.Dropout(self.dropout_prob )
UpperCamelCase__ : str = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase__ : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
UpperCamelCase__ : List[Any] = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = hidden_states
UpperCamelCase__ : Tuple = self.norma(__UpperCAmelCase )
UpperCamelCase__ : int = nn.swish(__UpperCAmelCase )
UpperCamelCase__ : str = self.conva(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = self.time_emb_proj(nn.swish(__UpperCAmelCase ) )
UpperCamelCase__ : Union[str, Any] = jnp.expand_dims(jnp.expand_dims(__UpperCAmelCase , 1 ) , 1 )
UpperCamelCase__ : Optional[int] = hidden_states + temb
UpperCamelCase__ : Optional[Any] = self.norma(__UpperCAmelCase )
UpperCamelCase__ : str = nn.swish(__UpperCAmelCase )
UpperCamelCase__ : List[str] = self.dropout(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = self.conva(__UpperCAmelCase )
if self.conv_shortcut is not None:
UpperCamelCase__ : int = self.conv_shortcut(__UpperCAmelCase )
return hidden_states + residual
| 285
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> Union[str, Any]:
A : Dict = logging.get_logger()
# the current default level is logging.WARNING
A : List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
def snake_case ( self ) -> str:
A : Any = logging.get_verbosity()
A : Optional[Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Tuple = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A : int = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , __UpperCAmelCase )
A : List[str] = logging.log_levels[env_level_str]
A : Optional[int] = logging.get_verbosity()
self.assertEqual(
__UpperCAmelCase , __UpperCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
A : str = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
A : str = logging.logging.getLogger()
with CaptureLogger(__UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def snake_case ( self ) -> Optional[int]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
A : Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Optional[int] = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
def snake_case__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 542
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any )-> int:
"""simple docstring"""
a =k_size // 2
a , a =mgrid[0 - center : k_size - center, 0 - center : k_size - center]
a =1 / (2 * pi * sigma) * exp(-(square(UpperCAmelCase_ ) + square(UpperCAmelCase_ )) / (2 * square(UpperCAmelCase_ )) )
return g
def lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any )-> List[Any]:
"""simple docstring"""
a , a =image.shape[0], image.shape[1]
# dst image height and width
a =height - k_size + 1
a =width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
a =zeros((dst_height * dst_width, k_size * k_size) )
a =0
for i, j in product(range(UpperCAmelCase_ ) , range(UpperCAmelCase_ ) ):
a =ravel(image[i : i + k_size, j : j + k_size] )
a =window
row += 1
# turn the kernel into shape(k*k, 1)
a =gen_gaussian_kernel(UpperCAmelCase_ , UpperCAmelCase_ )
a =ravel(UpperCAmelCase_ )
# reshape and get the dst image
a =dot(UpperCAmelCase_ , UpperCAmelCase_ ).reshape(UpperCAmelCase_ , UpperCAmelCase_ ).astype(UpperCAmelCase_ )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
_lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase = gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 721
|
_lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCamelCase ( )-> None:
"""simple docstring"""
a =input("""Enter message: """ )
a =input("""Enter key [alphanumeric]: """ )
a =input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a ="""encrypt"""
a =encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith("""d""" ):
a ="""decrypt"""
a =decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
a =[]
a =0
a =key.upper()
for symbol in message:
a =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
a =0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 321
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
_lowercase = 10
_lowercase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(UpperCamelCase__ ) ),
} , features=UpperCamelCase__ , )
return dataset
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Dict ) -> Dict:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=UpperCamelCase__ )
return filename
# FILE_CONTENT + files
snake_case = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Any:
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase = FILE_CONTENT
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return filename
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] ) -> int:
import bza
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase = bytes(UpperCamelCase__ , 'utf-8' )
with bza.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> int:
import gzip
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase = bytes(UpperCamelCase__ , 'utf-8' )
with gzip.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> Optional[int]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase = bytes(UpperCamelCase__ , 'utf-8' )
with lza.frame.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(UpperCamelCase__ , 'w' ) as archive:
archive.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] ) -> Optional[int]:
import tarfile
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> List[Any]:
import lzma
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase = bytes(UpperCamelCase__ , 'utf-8' )
with lzma.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Optional[int] ) -> int:
import zipfile
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase = bytes(UpperCamelCase__ , 'utf-8' )
with zstd.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> str:
_lowercase = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase = textwrap.dedent(
'\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return filename
snake_case = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
snake_case = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
snake_case = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
snake_case = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
snake_case = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] ) -> List[str]:
_lowercase = datasets.Dataset.from_dict(UpperCamelCase__ )
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Any:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(UpperCamelCase__ ) ) as con:
_lowercase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Optional[int]:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(UpperCamelCase__ , 'w' , newline='' ) as f:
_lowercase = csv.DictWriter(UpperCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Union[str, Any]:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(UpperCamelCase__ , 'w' , newline='' ) as f:
_lowercase = csv.DictWriter(UpperCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int ) -> str:
import bza
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(UpperCamelCase__ , 'rb' ) as f:
_lowercase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :Any ) -> Any:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Union[str, Any] , snake_case__ :Optional[int] ) -> Optional[int]:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int , snake_case__ :Tuple ) -> int:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Optional[int]:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(UpperCamelCase__ , 'wb' ) as f:
_lowercase = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ )
_lowercase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase__ ) )] for k in DATA[0]} , schema=UpperCamelCase__ )
writer.write_table(UpperCamelCase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> List[Any]:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase = {'data': DATA}
with open(UpperCamelCase__ , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> str:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase = {'data': DATA_DICT_OF_LISTS}
with open(UpperCamelCase__ , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> str:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> Union[str, Any]:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Tuple:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Dict:
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :Any ) -> Optional[Any]:
import gzip
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(UpperCamelCase__ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase__ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :List[str] ) -> Union[str, Any]:
import gzip
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(UpperCamelCase__ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase__ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :Any , snake_case__ :Optional[Any] ) -> Optional[int]:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :str , snake_case__ :Dict , snake_case__ :Any ) -> Optional[int]:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :int , snake_case__ :Dict ) -> List[Any]:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :Union[str, Any] , snake_case__ :List[str] ) -> Optional[int]:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int , snake_case__ :str , snake_case__ :int ) -> List[str]:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Union[str, Any]:
_lowercase = ['0', '1', '2', '3']
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> List[Any]:
_lowercase = ['0', '1', '2', '3']
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> Any:
_lowercase = ['0', '1', '2', '3']
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(UpperCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Union[str, Any] , snake_case__ :Optional[Any] ) -> Dict:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int , snake_case__ :Any ) -> Optional[int]:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Any , snake_case__ :Tuple ) -> int:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(UpperCamelCase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> Dict:
_lowercase = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Tuple ) -> Tuple:
_lowercase = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Dict:
_lowercase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 67
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Optional[int] = False
@skip_mps
class _UpperCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase = False
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A = CLIPTextModel(a__ )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , a__ , a__=0 ) -> Optional[Any]:
if str(a__ ).startswith("""mps""" ):
A = torch.manual_seed(a__ )
else:
A = torch.Generator(device=a__ ).manual_seed(a__ )
A = A = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs(a__ )
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> int:
super().test_save_load_local(expected_max_difference=5e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
A = torch.manual_seed(51 )
A = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=a__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
A = """a painting of an elephant with glasses"""
A = [5, 7]
A = pipe(
prompt=a__ , token_indices=a__ , guidance_scale=7.5 , generator=a__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 641
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Any = ['''prompt''']
_SCREAMING_SNAKE_CASE : Optional[int] = ['''prompt''']
_SCREAMING_SNAKE_CASE : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_SCREAMING_SNAKE_CASE : str = False
@property
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return 8
@property
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase_ = PriorTransformer(**__UpperCAmelCase )
return model
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase_ = ShapERenderer(**__UpperCAmelCase )
return model
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.dummy_prior
UpperCamelCase_ = self.dummy_text_encoder
UpperCamelCase_ = self.dummy_tokenizer
UpperCamelCase_ = self.dummy_renderer
UpperCamelCase_ = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
UpperCamelCase_ = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = 'cpu'
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**__UpperCAmelCase )
UpperCamelCase_ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
UpperCamelCase_ = output.images[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase_ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = torch_device == 'cpu'
UpperCamelCase_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**__UpperCAmelCase )
UpperCamelCase_ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase_ = batch_size * [inputs[key]]
UpperCamelCase_ = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase_ = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase_ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
UpperCamelCase_ = pipe(
'a shark' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 718
|
def a_ ( __snake_case ) -> Union[str, Any]:
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCamelCase_ , UpperCamelCase_ = head.next, head
while fast and fast.next:
UpperCamelCase_ = fast.next.next
UpperCamelCase_ = slow.next
UpperCamelCase_ = slow.next
UpperCamelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCamelCase_ = None
while second:
UpperCamelCase_ = second.next
UpperCamelCase_ = node
UpperCamelCase_ = second
UpperCamelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCamelCase_ = node.next
UpperCamelCase_ = head.next
return True
def a_ ( __snake_case ) -> List[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCamelCase_ = UpperCamelCase_ = UpperCamelCase_ = head
while fast and fast.next:
UpperCamelCase_ , UpperCamelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCamelCase_ = [slow.val]
while slow.next:
UpperCamelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCamelCase_ = cur.next
return True
def a_ ( __snake_case ) -> Optional[int]:
'''simple docstring'''
if not head or not head.next:
return True
UpperCamelCase_ = {}
UpperCamelCase_ = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCamelCase_ = [pos]
UpperCamelCase_ = head.next
pos += 1
UpperCamelCase_ = pos - 1
UpperCamelCase_ = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCamelCase_ = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 559
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'xlm'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , __snake_case=3_0145 , __snake_case=2048 , __snake_case=12 , __snake_case=16 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=1 , __snake_case=True , __snake_case=512 , __snake_case=2048**-0.5 , __snake_case=1e-12 , __snake_case=0.02 , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=3 , __snake_case=5 , __snake_case=True , __snake_case="first" , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=0.1 , __snake_case=5 , __snake_case=5 , __snake_case=0 , __snake_case=0 , __snake_case=2 , __snake_case=0 , **__snake_case , ) -> Dict:
'''simple docstring'''
__a =vocab_size
__a =emb_dim
__a =n_layers
__a =n_heads
__a =dropout
__a =attention_dropout
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =use_lang_emb
__a =layer_norm_eps
__a =bos_index
__a =eos_index
__a =pad_index
__a =unk_index
__a =mask_index
__a =is_encoder
__a =max_position_embeddings
__a =embed_init_std
__a =init_std
__a =summary_type
__a =summary_use_proj
__a =summary_activation
__a =summary_proj_to_labels
__a =summary_first_dropout
__a =start_n_top
__a =end_n_top
__a =mask_token_id
__a =lang_id
if "n_words" in kwargs:
__a =kwargs['n_words']
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , **__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 242
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'big_bird'
def __init__( self , __snake_case=5_0358 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu_new" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=4096 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=True , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=66 , __snake_case="block_sparse" , __snake_case=True , __snake_case=False , __snake_case=64 , __snake_case=3 , __snake_case=None , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , )
__a =vocab_size
__a =max_position_embeddings
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =type_vocab_size
__a =layer_norm_eps
__a =use_cache
__a =rescale_embeddings
__a =attention_type
__a =use_bias
__a =block_size
__a =num_random_blocks
__a =classifier_dropout
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 242
| 1
|
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''WhisperFeatureExtractor'''
UpperCamelCase_ : Optional[int] = '''WhisperTokenizer'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = self.feature_extractor
SCREAMING_SNAKE_CASE : str = False
def _A ( self : int , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=True ):
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase_ , language=UpperCAmelCase_ , no_timestamps=UpperCAmelCase_ )
def __call__( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("audio" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("text" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = args[0]
SCREAMING_SNAKE_CASE : int = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Tuple = encodings["input_ids"]
return inputs
def _A ( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple="np" ):
return self.tokenizer.get_prompt_ids(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
| 488
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''segformer'''
def __init__( self : List[Any] , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[int]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : int=[7, 3, 3, 3] , UpperCAmelCase_ : str=[4, 2, 2, 2] , UpperCAmelCase_ : List[str]=[1, 2, 5, 8] , UpperCAmelCase_ : List[Any]=[4, 4, 4, 4] , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : Dict=255 , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Any = num_encoder_blocks
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Any = sr_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : int = patch_sizes
SCREAMING_SNAKE_CASE : Optional[int] = strides
SCREAMING_SNAKE_CASE : Tuple = mlp_ratios
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get("reshape_last_stage" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = version.parse('''1.11''' )
@property
def _A ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : List[str] ):
return 1E-4
@property
def _A ( self : Any ):
return 12
| 488
| 1
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = LayoutLMTokenizer
SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
__A = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Dict , **UpperCamelCase_ : int ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = """UNwant\u00E9d,running"""
__A = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCamelCase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass
| 637
|
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
__A = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> dict[str, str]:
"""simple docstring"""
__A = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__A = remove_duplicates(key.upper() )
__A = len(__lowercase )
# First fill cipher with key characters
__A = {alphabet[i]: char for i, char in enumerate(__lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowercase ) , 2_6 ):
__A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__A = alphabet[i - offset]
__A = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
__A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
__A = input("""Enter message to encode or decode: """ ).strip()
__A = input("""Enter keyword: """ ).strip()
__A = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__A = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__A = create_cipher_map(__lowercase )
print(func(__lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 637
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowercase : str = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None ) -> List[str]:
# Initialise PyTorch model
lowercase_ : Optional[Any] = XLNetConfig.from_json_file(UpperCAmelCase__ )
lowercase_ : int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowercase_ : int = finetuning_task
lowercase_ : List[str] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(UpperCAmelCase__ )
elif "squad" in finetuning_task:
lowercase_ : Union[str, Any] = finetuning_task
lowercase_ : str = XLNetForQuestionAnswering(UpperCAmelCase__ )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
lowercase_ : str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_lowercase : Tuple = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCamelCase_ , lowerCamelCase_ = None ):
snake_case : List[str] =word_bank or []
# create a table
snake_case : int =len(lowerCamelCase_ ) + 1
snake_case : list[list[list[str]]] =[]
for _ in range(lowerCamelCase_ ):
table.append([] )
# seed value
snake_case : Dict =[[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase_ )] == word:
snake_case : list[list[str]] =[
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase_ )]:
combination.reverse()
return table[len(lowerCamelCase_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 349
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
A : int = {"""target_lang""": """fi""", """source_lang""": """en"""}
A : Tuple = """>>zh<<"""
A : Optional[int] = """Helsinki-NLP/"""
if is_torch_available():
A : Dict = """pt"""
elif is_tf_available():
A : Optional[int] = """tf"""
else:
A : List[str] = """jax"""
@require_sentencepiece
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = MarianTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().setUp()
snake_case : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : Optional[Any] =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
snake_case : Dict =Path(self.tmpdirname )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
snake_case : Any =MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str], **_snake_case : Tuple ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def __snake_case ( self : Any, _snake_case : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] ='''</s>'''
snake_case : int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Tuple =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''</s>''' )
self.assertEqual(vocab_keys[1], '''<unk>''' )
self.assertEqual(vocab_keys[-1], '''<pad>''' )
self.assertEqual(len(_snake_case ), 9 )
def __snake_case ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 9 )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Tuple =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
snake_case : List[str] =en_de_tokenizer(['''I am a small frog'''], return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
snake_case : Any =[38, 121, 14, 697, 38_848, 0]
self.assertListEqual(_snake_case, batch.input_ids[0] )
snake_case : List[Any] =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_snake_case )
snake_case : List[Any] =[x.name for x in Path(_snake_case ).glob('''*''' )]
self.assertIn('''source.spm''', _snake_case )
MarianTokenizer.from_pretrained(_snake_case )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Any =self.get_tokenizer()
snake_case : int =tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''], padding=_snake_case, truncation=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch.input_ids.shape, (2, 512) )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : List[str] =self.get_tokenizer()
snake_case : int =tok(['''I am a tiny frog''', '''I am a small frog'''], padding=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch_smaller.input_ids.shape, (2, 10) )
@slow
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : List[Any] ={'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='''Helsinki-NLP/opus-mt-en-de''', revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''', decode_kwargs={'''use_source_tokenizer''': True}, )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : Optional[Any] =MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
snake_case : List[str] ='''Tämä on testi'''
snake_case : Optional[int] ='''This is a test'''
snake_case : Optional[Any] =[76, 7, 2_047, 2]
snake_case : int =[69, 12, 11, 940, 2]
snake_case : Optional[int] =tokenizer(_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[Any] =tokenizer(text_target=_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[int] =tokenizer.decode(_snake_case, skip_special_tokens=_snake_case )
self.assertEqual(_snake_case, _snake_case )
| 349
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 411
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase__ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] , snake_case_ : Dict ) ->List[Any]:
'''simple docstring'''
if args.student_type == "roberta":
_lowercase : List[str] = False
elif args.student_type == "gpt2":
_lowercase : List[Any] = False
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : List[Any] ) ->Any:
'''simple docstring'''
if args.student_type == "roberta":
_lowercase : Optional[int] = False
def _SCREAMING_SNAKE_CASE( ) ->Any:
'''simple docstring'''
_lowercase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=40_00 , help='''Checkpoint interval.''' )
_lowercase : List[str] = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
_lowercase , _lowercase , _lowercase : Dict = MODEL_CLASSES[args.student_type]
_lowercase , _lowercase , _lowercase : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowercase : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowercase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowercase : Optional[Any] = tokenizer.all_special_tokens.index(snake_case_ )
_lowercase : Any = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
_lowercase : Union[str, Any] = special_tok_ids
_lowercase : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
_lowercase : List[Any] = pickle.load(snake_case_ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
_lowercase : Any = pickle.load(snake_case_ )
_lowercase : List[str] = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowercase : Any = 0.0 # do not predict special tokens
_lowercase : Dict = torch.from_numpy(snake_case_ )
else:
_lowercase : str = None
_lowercase : str = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
_lowercase : str = student_config_class.from_pretrained(args.student_config )
_lowercase : List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
_lowercase : int = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
_lowercase : Optional[int] = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
_lowercase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowercase : int = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 411
| 1
|
'''simple docstring'''
import math
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : float = 1 / 12_345 ) -> int:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 3
while True:
lowerCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
total_partitions += 1
if check_partition_perfect(_SCREAMING_SNAKE_CASE ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_SCREAMING_SNAKE_CASE )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 433
|
'''simple docstring'''
import re
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
lowerCAmelCase = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
UpperCAmelCase = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 433
| 1
|
'''simple docstring'''
from string import ascii_uppercase
__a: int = {char: i for i, char in enumerate(ascii_uppercase)}
__a: Dict = dict(enumerate(ascii_uppercase))
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = len(UpperCAmelCase )
lowercase__ : Optional[Any] = 0
while True:
if x == i:
lowercase__ : Dict = 0
if len(UpperCAmelCase ) == len(UpperCAmelCase ):
break
key += key[i]
i += 1
return key
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Any = ''''''
lowercase__ : Dict = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase__ : List[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = ''''''
lowercase__ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase__ : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCamelCase ( ):
lowercase__ : List[Any] = '''THE GERMAN ATTACK'''
lowercase__ : List[Any] = '''SECRET'''
lowercase__ : Optional[Any] = generate_key(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[int] = cipher_text(UpperCAmelCase , UpperCAmelCase )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(UpperCAmelCase , UpperCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a: Optional[int] = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428
| 0
|
'''simple docstring'''
import random
def A ( UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = num - 1
lowerCAmelCase__ = 0
while s % 2 == 0:
lowerCAmelCase__ = s // 2
t += 1
for _ in range(5 ):
lowerCAmelCase__ = random.randrange(2 , num - 1 )
lowerCAmelCase__ = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if v != 1:
lowerCAmelCase__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCAmelCase__ = i + 1
lowerCAmelCase__ = (v**2) % num
return True
def A ( UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
if num < 2:
return False
lowerCAmelCase__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCamelCase_ )
def A ( UpperCamelCase_ : int = 10_24 ) -> int:
'''simple docstring'''
while True:
lowerCAmelCase__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCamelCase_ ):
return num
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 48
|
from manim import *
class snake_case_ ( __lowercase ):
def UpperCAmelCase__ ( self : Optional[Any] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : List[str] = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : Dict = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : Optional[Any] = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : Dict = Text("""CPU""" , font_size=24 )
__lowerCAmelCase : List[Any] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
__lowerCAmelCase : Tuple = [mem.copy() for i in range(1 )]
__lowerCAmelCase : Optional[Any] = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : Any = Text("""GPU""" , font_size=24 )
__lowerCAmelCase : List[Any] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.align_to(_snake_case , _snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(_snake_case )
__lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Dict = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : Optional[int] = Text("""Model""" , font_size=24 )
__lowerCAmelCase : List[str] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , )
__lowerCAmelCase : Tuple = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
__lowerCAmelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase : Tuple = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) )
self.add(_snake_case )
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[Any] = []
for i, rect in enumerate(_snake_case ):
__lowerCAmelCase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
cpu_target.move_to(_snake_case )
cpu_target.generate_target()
__lowerCAmelCase : Optional[int] = 0.46 / 4
__lowerCAmelCase : Dict = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 )
cpu_targs.append(_snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 504
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = 3_8_4
if "tiny" in model_name:
UpperCAmelCase_ : Any = [3, 3, 9, 3]
UpperCAmelCase_ : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase_ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase_ : List[str] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Optional[Any] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase_ : Any = 5_1_2
if "large" in model_name:
UpperCAmelCase_ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Dict = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase_ : int = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase_ : Any = [3, 3, 2_7, 3]
UpperCAmelCase_ : int = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase_ : Optional[int] = 1_0_2_4
# set label information
UpperCAmelCase_ : Dict = 1_5_0
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase_ : Optional[int] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : int = ConvNextConfig(
depths=__lowercase , hidden_sizes=__lowercase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase_ : List[Any] = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = dct.pop(__lowercase )
UpperCAmelCase_ : List[Any] = val
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : str = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
UpperCAmelCase_ : List[str] = model_name_to_url[model_name]
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase_ : Optional[Any] = get_upernet_config(__lowercase )
UpperCAmelCase_ : List[Any] = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ : str = state_dict.pop(__lowercase )
if "bn" in key:
UpperCAmelCase_ : Tuple = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase_ : Optional[int] = val
# rename keys
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
model.load_state_dict(__lowercase )
# verify on image
UpperCAmelCase_ : Optional[int] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
UpperCAmelCase_ : List[Any] = SegformerImageProcessor()
UpperCAmelCase_ : int = processor(__lowercase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(__lowercase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase_ : Dict = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase_ : Any = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
lowerCAmelCase = set()
for token in tokens:
lowerCAmelCase = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
lowerCAmelCase = list(_UpperCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
lowerCAmelCase = bert_tokens
lowerCAmelCase ,lowerCAmelCase = 0, len(_UpperCAmelCase )
while start < end:
lowerCAmelCase = True
if is_chinese(bert_word[start] ):
lowerCAmelCase = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
lowerCAmelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase = '##' + bert_word[j]
lowerCAmelCase = start + i
lowerCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : LTP , _UpperCAmelCase : BertTokenizer ):
lowerCAmelCase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
lowerCAmelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
lowerCAmelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = []
for id in input_ids:
lowerCAmelCase = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
lowerCAmelCase = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
lowerCAmelCase = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__UpperCamelCase : Optional[int] = parser.parse_args()
main(args)
| 4
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 164
| 0
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase ( __magic_name__ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
return max(metric_fn(__magic_name__ , __magic_name__ ) for gt in ground_truths )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: Union[str, Any] = [line.strip() for line in open(__magic_name__ , "r" ).readlines()]
_lowercase: Optional[Any] = []
if args.gold_data_mode == "qa":
_lowercase: Optional[Any] = pd.read_csv(__magic_name__ , sep="\t" , header=__magic_name__ )
for answer_list in data[1]:
_lowercase: Any = ast.literal_eval(__magic_name__ )
answers.append(__magic_name__ )
else:
_lowercase: Any = [line.strip() for line in open(__magic_name__ , "r" ).readlines()]
_lowercase: Dict = [[reference] for reference in references]
_lowercase: Union[str, Any] = 0
for prediction, ground_truths in zip(__magic_name__ , __magic_name__ ):
total += 1
em += metric_max_over_ground_truths(__magic_name__ , __magic_name__ , __magic_name__ )
fa += metric_max_over_ground_truths(__magic_name__ , __magic_name__ , __magic_name__ )
_lowercase: Tuple = 100.0 * em / total
_lowercase: str = 100.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: int = args.k
_lowercase: Union[str, Any] = [line.strip() for line in open(__magic_name__ , "r" ).readlines()]
_lowercase: Dict = [line.strip() for line in open(__magic_name__ , "r" ).readlines()]
_lowercase: Union[str, Any] = 0
for hypo, reference in zip(__magic_name__ , __magic_name__ ):
_lowercase: Any = set(hypo.split("\t" )[:k] )
_lowercase: List[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowercase: Optional[int] = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
def strip_title(__magic_name__ ):
if title.startswith("\"" ):
_lowercase: Any = title[1:]
if title.endswith("\"" ):
_lowercase: List[str] = title[:-1]
return title
_lowercase: str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__magic_name__ , return_tensors="pt" , padding=__magic_name__ , truncation=__magic_name__ , )["input_ids"].to(args.device )
_lowercase: str = rag_model.rag.question_encoder(__magic_name__ )
_lowercase: str = question_enc_outputs[0]
_lowercase: List[str] = rag_model.retriever(
__magic_name__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
_lowercase: List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowercase: List[Any] = []
for docs in all_docs:
_lowercase: Optional[int] = [strip_title(__magic_name__ ) for title in docs["title"]]
provenance_strings.append("\t".join(__magic_name__ ) )
return provenance_strings
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
with torch.no_grad():
_lowercase: Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__magic_name__ , return_tensors="pt" , padding=__magic_name__ , truncation=__magic_name__ )
_lowercase: Optional[int] = inputs_dict.input_ids.to(args.device )
_lowercase: Optional[Any] = inputs_dict.attention_mask.to(args.device )
_lowercase: Any = rag_model.generate( # rag_model overwrites generate
__magic_name__ , attention_mask=__magic_name__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__magic_name__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowercase: Any = rag_model.retriever.generator_tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
if args.print_predictions:
for q, a in zip(__magic_name__ , __magic_name__ ):
logger.info("Q: {} - A: {}".format(__magic_name__ , __magic_name__ ) )
return answers
def __lowerCAmelCase ( ):
_lowercase: Any = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__magic_name__ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__magic_name__ , choices=["exact", "compressed", "legacy"] , type=__magic_name__ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__magic_name__ , type=__magic_name__ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__magic_name__ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__magic_name__ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__magic_name__ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__magic_name__ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__magic_name__ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__magic_name__ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__magic_name__ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__magic_name__ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=5_0 , type=__magic_name__ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
_lowercase: Dict = parser.parse_args()
_lowercase: Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Dict = {}
if args.model_type is None:
_lowercase: Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
_lowercase: List[str] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
_lowercase: Union[str, Any] = args.n_docs
if args.index_name is not None:
_lowercase: Tuple = args.index_name
if args.index_path is not None:
_lowercase: str = args.index_path
else:
_lowercase: Optional[int] = BartForConditionalGeneration
_lowercase: Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __magic_name__ )
_lowercase: Optional[Any] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
_lowercase: Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__magic_name__ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__magic_name__ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
_lowercase: Optional[Any] = RagRetriever.from_pretrained(__magic_name__ , **__magic_name__ )
_lowercase: List[str] = model_class.from_pretrained(__magic_name__ , retriever=__magic_name__ , **__magic_name__ )
model.retriever.init_retrieval()
else:
_lowercase: Dict = model_class.from_pretrained(__magic_name__ , **__magic_name__ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
_lowercase: List[Any] = []
for line in tqdm(__magic_name__ ):
questions.append(line.strip() )
if len(__magic_name__ ) == args.eval_batch_size:
_lowercase: Tuple = evaluate_batch_fn(__magic_name__ , __magic_name__ , __magic_name__ )
preds_file.write("\n".join(__magic_name__ ) + "\n" )
preds_file.flush()
_lowercase: int = []
if len(__magic_name__ ) > 0:
_lowercase: Optional[Any] = evaluate_batch_fn(__magic_name__ , __magic_name__ , __magic_name__ )
preds_file.write("\n".join(__magic_name__ ) )
preds_file.flush()
score_fn(__magic_name__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = get_args()
main(args)
| 206
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BeitFeatureExtractor']
_SCREAMING_SNAKE_CASE : Optional[int] = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 206
| 1
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case : Dict = logging.getLogger(__name__)
__snake_case : Any = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__snake_case : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default=lowercase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase_ )} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default=lowercase_ , metadata={'help': 'The input training data file (a text file).'} )
__snake_case = field(
default=lowercase_ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case = field(default=lowercase_ , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __lowerCamelCase ( __snake_case : DataTrainingArguments, __snake_case : PreTrainedTokenizer, __snake_case : bool = False, __snake_case : Optional[str] = None, ) -> str:
"""simple docstring"""
def _dataset(__snake_case : Tuple, __snake_case : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=__snake_case, file_path=__snake_case, block_size=args.block_size, ref_path=__snake_case, )
return LineByLineTextDataset(tokenizer=__snake_case, file_path=__snake_case, block_size=args.block_size )
else:
return TextDataset(
tokenizer=__snake_case, file_path=__snake_case, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=__snake_case, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ : Any =parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A__ : Any =AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ : Optional[int] =AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
A__ : Optional[Any] =CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
A__ : Tuple =AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ : Optional[int] =AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
A__ : Dict =AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=__snake_case, cache_dir=model_args.cache_dir, )
else:
logger.info("""Training new model from scratch""" )
A__ : Any =AutoModelWithLMHead.from_config(__snake_case )
model.resize_token_embeddings(len(__snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
A__ : int =tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A__ : Any =min(data_args.block_size, tokenizer.max_len )
# Get datasets
A__ : Optional[int] =(
get_dataset(__snake_case, tokenizer=__snake_case, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A__ : Tuple =(
get_dataset(__snake_case, tokenizer=__snake_case, evaluate=__snake_case, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A__ : Tuple =DataCollatorForPermutationLanguageModeling(
tokenizer=__snake_case, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
A__ : str =DataCollatorForWholeWordMask(
tokenizer=__snake_case, mlm_probability=data_args.mlm_probability )
else:
A__ : Optional[int] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ : Optional[Any] =Trainer(
model=__snake_case, args=__snake_case, data_collator=__snake_case, train_dataset=__snake_case, eval_dataset=__snake_case, prediction_loss_only=__snake_case, )
# Training
if training_args.do_train:
A__ : Dict =(
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ : Dict ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A__ : Dict =trainer.evaluate()
A__ : Optional[int] =math.exp(eval_output["""eval_loss"""] )
A__ : Optional[int] ={"""perplexity""": perplexity}
A__ : Union[str, Any] =os.path.join(training_args.output_dir, """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(__snake_case, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""", __snake_case, str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(__snake_case )
return results
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 215
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case : str = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A__ : List[Any] =self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ : Optional[int] ="""src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=None ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ : List[str] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ : int =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ : Any =black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
A__ : Optional[Any] =os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Tuple =check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
A__ : List[str] ="""TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCAmelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A__ : int =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A__ : Dict =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : List[str] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A__ , A__ : int =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ , A__ : List[str] =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase_ )
A__ : str =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A__ : Optional[int] =(
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : Union[str, Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ , A__ : Dict =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 215
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowerCamelCase =logging.get_logger(__name__)
@dataclass
class a_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,snake_case : Tuple=False ,snake_case : List[Any]=False ,snake_case : Union[str, Any]=6.0 ,snake_case : Optional[Any]=None ,snake_case : Tuple=False ,snake_case : Optional[Any]=False ,snake_case : Dict=None ,snake_case : int="fp4" ,snake_case : Union[str, Any]=False ,**snake_case : Tuple ,):
SCREAMING_SNAKE_CASE =load_in_abit
SCREAMING_SNAKE_CASE =load_in_abit
SCREAMING_SNAKE_CASE =llm_inta_threshold
SCREAMING_SNAKE_CASE =llm_inta_skip_modules
SCREAMING_SNAKE_CASE =llm_inta_enable_fpaa_cpu_offload
SCREAMING_SNAKE_CASE =llm_inta_has_fpaa_weight
SCREAMING_SNAKE_CASE =bnb_abit_quant_type
SCREAMING_SNAKE_CASE =bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
SCREAMING_SNAKE_CASE =torch.floataa
elif isinstance(snake_case ,snake_case ):
SCREAMING_SNAKE_CASE =getattr(snake_case ,snake_case )
elif isinstance(snake_case ,torch.dtype ):
SCREAMING_SNAKE_CASE =bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _lowerCAmelCase ( self : str ):
if not isinstance(self.llm_inta_threshold ,snake_case ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules ,snake_case ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload ,snake_case ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight ,snake_case ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype ,torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type ,snake_case ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant ,snake_case ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _lowerCAmelCase ( self : Any ):
return self.load_in_abit or self.load_in_abit
def _lowerCAmelCase ( self : List[str] ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : str ,**snake_case : List[str] ):
SCREAMING_SNAKE_CASE =cls(**snake_case )
SCREAMING_SNAKE_CASE =[]
for key, value in kwargs.items():
if hasattr(snake_case ,snake_case ):
setattr(snake_case ,snake_case ,snake_case )
to_remove.append(snake_case )
for key in to_remove:
kwargs.pop(snake_case ,snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _lowerCAmelCase ( self : str ,snake_case : Union[str, os.PathLike] ):
with open(snake_case ,'w' ,encoding='utf-8' ) as writer:
SCREAMING_SNAKE_CASE =self.to_dict()
SCREAMING_SNAKE_CASE =json.dumps(snake_case ,indent=2 ,sort_keys=snake_case ) + '\n'
writer.write(snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE =str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : Optional[Any] ):
return f'{self.__class__.__name__} {self.to_json_string()}'
def _lowerCAmelCase ( self : Optional[int] ,snake_case : bool = True ):
if use_diff is True:
SCREAMING_SNAKE_CASE =self.to_diff_dict()
else:
SCREAMING_SNAKE_CASE =self.to_dict()
return json.dumps(snake_case ,indent=2 ,sort_keys=snake_case ) + "\n"
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.to_dict()
# get the default config dict
SCREAMING_SNAKE_CASE =BitsAndBytesConfig().to_dict()
SCREAMING_SNAKE_CASE ={}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
SCREAMING_SNAKE_CASE =value
return serializable_config_dict
| 721
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCAmelCase ( self : Any ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='tf' )
SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case ,decimals=6 ) ,[
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] ,)
SCREAMING_SNAKE_CASE =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case ,decimals=6 ) ,[
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] ,)
SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(snake_case ,decimals=6 ) ,[
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] ,)
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='pt' )
SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case ,decimals=6 ) ,[
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] ,)
SCREAMING_SNAKE_CASE =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case ,decimals=6 ) ,[
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] ,)
SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(snake_case ,decimals=6 ) ,[
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] ,)
SCREAMING_SNAKE_CASE =unmasker('My name is <mask> <mask>' ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=6 ) ,[
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] ,)
@require_torch_gpu
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pipeline('fill-mask' ,model='hf-internal-testing/tiny-random-distilbert' ,device=0 ,framework='pt' )
# convert model to fp16
pipe.model.half()
SCREAMING_SNAKE_CASE =pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case ,snake_case )
@slow
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='pt' )
self.run_large_test(snake_case )
@slow
@require_tf
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='tf' )
self.run_large_test(snake_case )
def _lowerCAmelCase ( self : List[Any] ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) ,[
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'},
] ,)
SCREAMING_SNAKE_CASE =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) ,[
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12790,
'token_str': ' Lyon',
},
] ,)
SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(snake_case ) ,[
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'},
] ,)
@require_torch
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='pt' )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
self.run_pipeline_test(snake_case ,[] )
@require_tf
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='tf' )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
self.run_pipeline_test(snake_case ,[] )
def _lowerCAmelCase ( self : Tuple ,snake_case : Dict ,snake_case : Optional[Any] ,snake_case : Dict ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case )
SCREAMING_SNAKE_CASE =[
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def _lowerCAmelCase ( self : int ,snake_case : Optional[Any] ,snake_case : str ):
SCREAMING_SNAKE_CASE =fill_masker.tokenizer
SCREAMING_SNAKE_CASE =fill_masker.model
SCREAMING_SNAKE_CASE =fill_masker(
f'This is a {tokenizer.mask_token}' ,)
self.assertEqual(
snake_case ,[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] ,)
SCREAMING_SNAKE_CASE =fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
snake_case ,[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] ,)
SCREAMING_SNAKE_CASE =fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
snake_case ,[
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] ,)
with self.assertRaises(snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case ):
fill_masker('This is' )
self.run_test_top_k(snake_case ,snake_case )
self.run_test_targets(snake_case ,snake_case )
self.run_test_top_k_targets(snake_case ,snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case ,snake_case )
self.fill_mask_with_multiple_masks(snake_case ,snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[Any] ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =tokenizer.get_vocab()
SCREAMING_SNAKE_CASE =sorted(vocab.keys() )[:2]
# Pipeline argument
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ,targets=snake_case )
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
snake_case ,[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] ,)
SCREAMING_SNAKE_CASE ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} ,snake_case )
SCREAMING_SNAKE_CASE =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} ,set(snake_case ) )
# Call argument
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case )
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=snake_case )
self.assertEqual(
snake_case ,[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] ,)
SCREAMING_SNAKE_CASE ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} ,snake_case )
SCREAMING_SNAKE_CASE =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} ,set(snake_case ) )
# Score equivalence
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=snake_case )
SCREAMING_SNAKE_CASE =[top_mask['token_str'] for top_mask in outputs]
SCREAMING_SNAKE_CASE =[top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ) == set(snake_case ):
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=snake_case )
SCREAMING_SNAKE_CASE =[top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case ) ,nested_simplify(snake_case ) )
# Raises with invalid
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=[''] )
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets='' )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ,top_k=2 )
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
snake_case ,[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] ,)
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case )
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=2 )
self.assertEqual(
snake_case ,[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] ,)
self.assertEqual(nested_simplify(snake_case ) ,nested_simplify(snake_case ) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =tokenizer.get_vocab()
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case )
# top_k=2, ntargets=3
SCREAMING_SNAKE_CASE =sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=2 ,targets=snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
SCREAMING_SNAKE_CASE =[el['token_str'] for el in sorted(snake_case ,key=lambda snake_case : x["score"] ,reverse=snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ).issubset(snake_case ):
SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=3 ,targets=snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case ) ,nested_simplify(snake_case ) )
def _lowerCAmelCase ( self : Any ,snake_case : str ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case )
SCREAMING_SNAKE_CASE =tokenizer.get_vocab()
# String duplicates + id duplicates
SCREAMING_SNAKE_CASE =sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE =[targets[0], targets[1], targets[0], targets[2], targets[1]]
SCREAMING_SNAKE_CASE =fill_masker(f'My name is {tokenizer.mask_token}' ,targets=snake_case ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case ) ,3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case )
SCREAMING_SNAKE_CASE =fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' ,top_k=2 )
self.assertEqual(
snake_case ,[
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] ,)
| 252
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = 1.5
lowerCamelCase = int(factor * num_class_images )
lowerCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowerCamelCase__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCamelCase = client.query(text=lowerCamelCase__ )
if len(lowerCamelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowerCamelCase = int(factor * num_images )
lowerCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=lowerCamelCase__ , aesthetic_weight=0.1 , )
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = tqdm(desc="""downloading real regularization images""" , total=lowerCamelCase__ )
with open(f'{class_data_dir}/caption.txt' , """w""" ) as fa, open(f'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
f'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
lowerCamelCase = class_images[count]
count += 1
try:
lowerCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
lowerCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser("""""" , add_help=lowerCamelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=lowerCamelCase__ , type=lowerCamelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=lowerCamelCase__ , type=lowerCamelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=lowerCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 457
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase : Any = logging.getLogger(__name__)
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=lowerCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=lowerCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=lowerCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=lowerCamelCase__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=lowerCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=lowerCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
def fn(lowerCamelCase__ : Any ):
return tokenizer(examples["""text"""] )
return fn
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase = tf.train.Features(feature=lowerCamelCase__ )
lowerCamelCase = tf.train.Example(features=lowerCamelCase__ )
lowerCamelCase = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase = min(len(lowerCamelCase__ ) , args.limit )
lowerCamelCase = dataset.select(range(lowerCamelCase__ ) )
print(f'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowerCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase = tokenize_function(lowerCamelCase__ )
lowerCamelCase = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ : List[str] ):
# Concatenate all texts.
lowerCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1000 , num_proc=4 )
lowerCamelCase = 0
lowerCamelCase = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowerCamelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase = len(dataset_snapshot["""input_ids"""] )
lowerCamelCase = os.path.join(lowerCamelCase__ , f'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowerCamelCase = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("""Wrote file {} containing {} records""".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = parse_args()
main(args)
| 457
| 1
|
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = [0 for i in range(len(UpperCAmelCase__ ) )]
# initialize interval's left pointer and right pointer
lowerCamelCase , lowerCamelCase = 0, 0
for i in range(1 , len(UpperCAmelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCamelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCamelCase = min_edge
while go_next(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCamelCase , lowerCamelCase = i, i + z_result[i] - 1
return z_result
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return i + z_result[i] < len(UpperCAmelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCAmelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {'vocab_file': 'spiece.model'}
a_ : Optional[Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
'''simple docstring'''
return len(self.sp_model )
def _a (self ):
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__(self , __a ):
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , __a ):
'''simple docstring'''
if self.remove_space:
lowerCamelCase = " ".join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("NFKD" , __a )
lowerCamelCase = "".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.preprocess_text(__a )
lowerCamelCase = self.sp_model.encode(__a , out_type=__a )
lowerCamelCase = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.PieceToId(__a )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.IdToPiece(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _a (self , *__a , **__a ):
'''simple docstring'''
lowerCamelCase = super()._decode(*__a , **__a )
lowerCamelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 484
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE :str = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=a_ )
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
__A = tmp_path_factory.getbasetemp() / "cache"
__A = test_hf_cache_home / "datasets"
__A = test_hf_cache_home / "metrics"
__A = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(a_ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(a_ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(a_ ) )
__A = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(a_ ) )
__A = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(a_ ) )
@pytest.fixture(autouse=a_ , scope="session" )
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , a_ )
@pytest.fixture
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , a_ )
| 55
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 104
| 0
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 472
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size for training."} )
a = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a = field(
default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
a = field(default="cosine" , metadata={"help": "Learning rate."} )
a = field(
default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
a = field(
default=A__ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} )
a = field(default=1 , metadata={"help": "Training seed."} )
a = field(
default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a = field(
default=A__ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a = field(default=A__ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
a = field(
default=A__ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a = field(
default=A__ , metadata={"help": "Sample from the language model's output distribution."} )
a = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} )
a = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a = field(default=0.9_5 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
a = field(
default=2_00 , metadata={"help": "Number of completions to generate for each sample."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default=A__ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a = field(
default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(
default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a = field(
default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a = field(
default=0.2_5 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a = field(
default=A__ , metadata={"help": "If True, near-duplicate samples are removed."} )
a = field(
default=0.8_5 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} )
a = field(
default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} )
a = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
| 472
| 1
|
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_lowercase : Optional[int] = str(lowerCamelCase_ )
_lowercase : Dict = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCamelCase_( lowerCamelCase_ = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(99)}")
| 89
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCAmelCase_ ( _a):
@add_start_docstrings(__A )
def __call__( self : Dict , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Union[str, Any] ) ->bool:
"""simple docstring"""
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCAmelCase_ ( _a):
def __init__( self : Dict , __A : int , __A : Optional[int] = None ) ->Dict:
"""simple docstring"""
a__ :List[str] = max_length
a__ :Union[str, Any] = max_position_embeddings
@add_start_docstrings(__A )
def __call__( self : int , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ) ->bool:
"""simple docstring"""
a__ :Optional[int] = input_ids.shape[-1]
a__ :Dict = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCAmelCase_ ( _a):
def __init__( self : Any , __A : int , __A : int ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , __A , )
a__ :Tuple = start_length
a__ :List[Any] = max_new_tokens
a__ :int = start_length + max_new_tokens
@add_start_docstrings(__A )
def __call__( self : Any , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ) ->bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase_ ( _a):
def __init__( self : Any , __A : float , __A : Optional[float] = None ) ->Tuple:
"""simple docstring"""
a__ :Any = max_time
a__ :List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__A )
def __call__( self : Any , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Union[str, Any] ) ->bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase_ ( _a):
@add_start_docstrings(__A )
def __call__( self : List[str] , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[Any] ) ->bool:
"""simple docstring"""
return any(criteria(__A , __A ) for criteria in self )
@property
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__A , __A ):
return stopping_criterium.max_length
elif isinstance(__A , __A ):
return stopping_criterium.max_length
return None
def lowerCamelCase__ ( a : StoppingCriteriaList , a : int ) -> StoppingCriteriaList:
"""simple docstring"""
a__ :Tuple = stopping_criteria.max_length
a__ :str = deepcopy(a )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , a )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a ) )
return new_stopping_criteria
| 395
| 0
|
'''simple docstring'''
from __future__ import annotations
import queue
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ ) -> List[str]:
lowercase : List[Any] = data
lowercase : Any = None
lowercase : Optional[Any] = None
def _A ( ) -> TreeNode:
print("\n********Press N to stop entering at any point of time********\n" )
lowercase : Optional[int] = input("Enter the value of the root node: " ).strip().lower()
lowercase : queue.Queue = queue.Queue()
lowercase : Any = TreeNode(int(A ) )
q.put(A )
while not q.empty():
lowercase : List[Any] = q.get()
lowercase : str = F'''Enter the left node of {node_found.data}: '''
lowercase : List[str] = input(A ).strip().lower() or "n"
if check == "n":
return tree_node
lowercase : Tuple = TreeNode(int(A ) )
lowercase : Any = left_node
q.put(A )
lowercase : str = F'''Enter the right node of {node_found.data}: '''
lowercase : Optional[int] = input(A ).strip().lower() or "n"
if check == "n":
return tree_node
lowercase : Union[str, Any] = TreeNode(int(A ) )
lowercase : List[Any] = right_node
q.put(A )
raise
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
print(node.data ,end="," )
pre_order(node.left )
pre_order(node.right )
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
in_order(node.left )
print(node.data ,end="," )
in_order(node.right )
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end="," )
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
lowercase : queue.Queue = queue.Queue()
q.put(A )
while not q.empty():
lowercase : List[str] = q.get()
print(node_dequeued.data ,end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
lowercase : queue.Queue = queue.Queue()
q.put(A )
while not q.empty():
lowercase : Optional[Any] = []
while not q.empty():
lowercase : List[Any] = q.get()
print(node_dequeued.data ,end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(A )
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
lowercase : list[TreeNode] = []
lowercase : str = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end="," )
stack.append(A )
lowercase : Dict = n.left
# end of while means current node doesn't have left child
lowercase : List[Any] = stack.pop()
# start to traverse its right child
lowercase : List[str] = n.right
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
lowercase : list[TreeNode] = []
lowercase : Any = node
while n or stack:
while n:
stack.append(A )
lowercase : int = n.left
lowercase : Tuple = stack.pop()
print(n.data ,end="," )
lowercase : Optional[Any] = n.right
def _A ( A ) -> None:
if not isinstance(A ,A ) or not node:
return
lowercase : Any = [], []
lowercase : int = node
stacka.append(A )
while stacka: # to find the reversed order of post order, store it in stack2
lowercase : int = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end="," )
def _A ( A = "" ,A=5_0 ,A="*" ) -> str:
if not s:
return "\n" + width * char
lowercase : Union[str, Any] = divmod(width - len(A ) - 2 ,2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCAmelCase : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 710
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( A ) -> List[Tuple[int, ...]]:
lowercase : Any = []
if isinstance(A ,A ):
for v in tree.values():
shapes.extend(_fetch_dims(A ) )
elif isinstance(A ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(A ) )
elif isinstance(A ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( A ,A ) -> Tuple[int, ...]:
lowercase : str = []
for d in reversed(A ):
idx.append(flat_idx % d )
lowercase : int = flat_idx // d
return tuple(reversed(A ) )
@torch.jit.ignore
def _A ( A ,A ,A ,A = None ,A = None ,) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(A ) -> None:
lowercase : int = True
for i in range(len(A ) ):
lowercase : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase : str = l[reversed_idx]
if start_edges is None:
lowercase : Any = [s == 0 for s in start]
reduce_edge_list(A )
if end_edges is None:
lowercase : Optional[int] = [e == (d - 1) for e, d in zip(A ,A )]
reduce_edge_list(A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(A ) == 0:
return [()]
elif len(A ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
lowercase : List[Tuple[slice, ...]] = []
lowercase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(A ,A ):
if s == e:
path_list.append(slice(A ,s + 1 ) )
else:
break
lowercase : Tuple[slice, ...] = tuple(A )
lowercase : Union[str, Any] = len(A )
# start == end, and we're done
if divergence_idx == len(A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase : List[Any] = start[divergence_idx]
return tuple(
path + (slice(A ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase : Any = end[divergence_idx]
return tuple(
path + (slice(A ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( A ,A ,A ,A ) -> torch.Tensor:
lowercase : Optional[int] = t.shape[:no_batch_dims]
lowercase : Optional[int] = list(_flat_idx_to_idx(A ,A ) )
# _get_minimal_slice_set is inclusive
lowercase : Dict = list(_flat_idx_to_idx(flat_end - 1 ,A ) )
# Get an ordered list of slices to perform
lowercase : Optional[Any] = _get_minimal_slice_set(
A ,A ,A ,)
lowercase : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( A ,A ,A ,A ,A = False ,A = None ,A = False ,) -> Any:
if not (len(A ) > 0):
raise ValueError("Must provide at least one input" )
lowercase : List[str] = [shape[:no_batch_dims] for shape in _fetch_dims(A )]
lowercase : Optional[int] = tuple([max(A ) for s in zip(*A )] )
def _prep_inputs(A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase : Union[str, Any] = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
lowercase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase : Dict[str, Any] = tensor_tree_map(_prep_inputs ,A )
lowercase : Optional[int] = None
if _out is not None:
lowercase : Dict = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
lowercase : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase : List[str] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase : int = 0
lowercase : List[str] = prepped_outputs
for _ in range(A ):
# Chunk the input
if not low_mem:
lowercase : Union[str, Any] = _select_chunk
else:
lowercase : Tuple = partial(
_chunk_slice ,flat_start=A ,flat_end=min(A ,i + chunk_size ) ,no_batch_dims=len(A ) ,)
lowercase : Dict[str, Any] = tensor_tree_map(A ,A )
# Run the layer on the chunk
lowercase : Dict = layer(**A )
# Allocate space for the output
if out is None:
lowercase : List[Any] = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,A )
# Put the chunk in its pre-allocated space
if isinstance(A ,A ):
def assign(A ,A ) -> None:
for k, v in da.items():
if isinstance(A ,A ):
assign(A ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase : Optional[int] = da[k]
assign(A ,A )
elif isinstance(A ,A ):
for xa, xa in zip(A ,A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase : Tuple = xa
elif isinstance(A ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase : Tuple = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
lowercase : Optional[int] = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) ,A )
return out
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ = 5_1_2 , ) -> int:
lowercase : Union[str, Any] = max_chunk_size
lowercase : Optional[int] = None
lowercase : Optional[tuple] = None
def a__ ( self , a_ , a_ , a_ ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase : int = [c for c in candidates if c > min_chunk_size]
lowercase : Optional[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(a_ ) -> bool:
try:
with torch.no_grad():
fn(*a_ , chunk_size=a_ )
return True
except RuntimeError:
return False
lowercase : str = 0
lowercase : Dict = len(a_ ) - 1
while i > min_viable_chunk_size_index:
lowercase : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
lowercase : Union[str, Any] = (min_viable_chunk_size_index + i) // 2
else:
lowercase : Any = i
lowercase : str = (i + len(a_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self , a_ , a_ ) -> bool:
lowercase : Optional[Any] = True
for aa, aa in zip(a_ , a_ ):
assert type(a_ ) == type(a_ )
if isinstance(a_ , (list, tuple) ):
consistent &= self._compare_arg_caches(a_ , a_ )
elif isinstance(a_ , a_ ):
lowercase : List[str] = [v for _, v in sorted(aa.items() , key=lambda a_ : x[0] )]
lowercase : int = [v for _, v in sorted(aa.items() , key=lambda a_ : x[0] )]
consistent &= self._compare_arg_caches(a_ , a_ )
else:
consistent &= aa == aa
return consistent
def a__ ( self , a_ , a_ , a_ , ) -> int:
lowercase : str = True
lowercase : tuple = tree_map(lambda a_ : a.shape if isinstance(a_ , torch.Tensor ) else a , a_ , a_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(a_ )
lowercase : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , a_ )
else:
# Otherwise, we can reuse the precomputed value
lowercase : str = False
if not consistent:
lowercase : Union[str, Any] = self._determine_favorable_chunk_size(
a_ , a_ , a_ , )
lowercase : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 425
| 0
|
"""simple docstring"""
def snake_case ( A__ ):
UpperCAmelCase_ : int = len(A__ )
for i in range(A__ ):
for j in range(i + 1 ,A__ ):
if numbers[j] < numbers[i]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 95
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case ( A__ ):
UpperCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ : int = x_den * y_den * z_den
UpperCAmelCase_ : int = gcd(A__ ,A__ )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case ( A__ = 35 ):
UpperCAmelCase_ : set = set()
UpperCAmelCase_ : int
UpperCAmelCase_ : Fraction = Fraction(0 )
UpperCAmelCase_ : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
UpperCAmelCase_ : Optional[int] = x_num * y_den + x_den * y_num
UpperCAmelCase_ : List[Any] = x_den * y_den
UpperCAmelCase_ : Tuple = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : int = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Any = int(sqrt(A__ ) )
UpperCAmelCase_ : str = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Tuple = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=-1
UpperCAmelCase_ : Optional[int] = x_num * y_num
UpperCAmelCase_ : Dict = x_den * y_num + x_num * y_den
UpperCAmelCase_ : Union[str, Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : List[Any] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = x_num * x_num * y_num * y_num
UpperCAmelCase_ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : str = int(sqrt(A__ ) )
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Optional[Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Optional[int] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ ,A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 95
| 1
|
'''simple docstring'''
from string import ascii_uppercase
_A: Optional[int] = {str(ord(c) - 55): c for c in ascii_uppercase}
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> str:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
__UpperCAmelCase = ''
__UpperCAmelCase = 0
__UpperCAmelCase = 0
while div != 1:
__UpperCAmelCase , __UpperCAmelCase = divmod(_lowerCAmelCase , _lowerCAmelCase )
if base >= 11 and 9 < mod < 36:
__UpperCAmelCase = ALPHABET_VALUES[str(_lowerCAmelCase )]
else:
__UpperCAmelCase = str(_lowerCAmelCase )
new_value += actual_value
__UpperCAmelCase = num // base
__UpperCAmelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_lowerCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 715
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A: int = logging.getLogger(__name__)
class UpperCAmelCase :
def __init__( self ):
__UpperCAmelCase = False
def __lowerCamelCase ( self , __A , __A , __A , __A ):
if not self.initialized:
__UpperCAmelCase = RagRetriever(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__UpperCAmelCase = True
def __lowerCamelCase ( self ):
self.retriever.index.init_index()
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase , __UpperCAmelCase = self.retriever._main_retrieve(__A , __A )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , __A , __A , __A , __A , __A=None ):
if index is not None and index.is_initialized() and len(__A ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__UpperCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__A , __A , __A , __A )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , __A , __A ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCAmelCase , __UpperCAmelCase = ray.get(random_worker.retrieve.remote(__A , __A ) )
else:
__UpperCAmelCase , __UpperCAmelCase = self._main_retrieve(__A , __A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__A )
@classmethod
def __lowerCamelCase ( cls , __A , __A=None , **__A ):
return super(__A , cls ).get_tokenizers(__A , __A , **__A )
@classmethod
def __lowerCamelCase ( cls , __A , __A , __A=None , **__A ):
__UpperCAmelCase = kwargs.pop('config' , __A ) or RagConfig.from_pretrained(__A , **__A )
__UpperCAmelCase = RagTokenizer.from_pretrained(__A , config=__A )
__UpperCAmelCase = rag_tokenizer.question_encoder
__UpperCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCAmelCase = 'custom'
__UpperCAmelCase = CustomHFIndex(config.retrieval_vector_size , __A )
else:
__UpperCAmelCase = cls._build_index(__A )
return cls(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , retrieval_workers=__A , index=__A , )
| 617
| 0
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 30
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
lowercase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 24
|
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A_: Tuple = logging.get_logger(__name__)
A_: Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_: List[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A_: Union[str, Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A_: Union[str, Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A_: Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
A_: Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
A_: Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
A_: List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A_: Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A_: List[str] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class _lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
A_: Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A_: List[str] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A_: str = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
class _lowercase :
"""simple docstring"""
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
elif titles is None or texts is None:
_lowercase = titles if texts is None else texts
return super().__call__(
UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles]
_lowercase = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts]
_lowercase = len(UpperCamelCase_ )
_lowercase = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages
assert len(UpperCamelCase_ ) == len(
UpperCamelCase_ ), F'''There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.'''
_lowercase = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['input_ids']
_lowercase = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['input_ids']
_lowercase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ )
]
}
if return_attention_mask is not False:
_lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowercase = attention_mask
return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = 64 , UpperCAmelCase = 4 , ):
'''simple docstring'''
_lowercase = reader_input['input_ids']
_lowercase = reader_output[:3]
_lowercase = len(UpperCamelCase_ )
_lowercase = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ )
_lowercase = []
for doc_id in sorted_docs:
_lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase = sequence_ids.index(self.pad_token_id )
else:
_lowercase = len(UpperCamelCase_ )
_lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
'''simple docstring'''
_lowercase = []
for start_index, start_score in enumerate(UpperCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowercase = sorted(UpperCamelCase_ , key=lambda UpperCAmelCase : x[1] , reverse=UpperCamelCase_ )
_lowercase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
_lowercase = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class _lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = DPRReaderTokenizer
| 398
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = '''▁'''
UpperCamelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
UpperCamelCase_ = {
'''google/pegasus-xsum''': 5_12,
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = PegasusTokenizer
A_ : Dict = ['input_ids', 'attention_mask']
def __init__( self : List[str] , UpperCamelCase_ : str=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[Any]="<mask_2>" , UpperCamelCase_ : int="<mask_1>" , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=1_03 , **UpperCamelCase_ : Optional[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Dict = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is'''
f''' {type(UpperCamelCase_ )}''' )
SCREAMING_SNAKE_CASE__ :List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCamelCase_ ) , self.offset - 1 )
]
if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE__ :List[str] = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE__ :Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :int = vocab_file
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False if not self.vocab_file else True
def __lowerCamelCase ( self : int , UpperCamelCase_ : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 209
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_a : Dict = None
_a : str = logging.get_logger(__name__)
_a : List[str] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_a : Union[str, Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
_a : Optional[int] = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
_a : Optional[int] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_VOCAB_FILES_MAP
A = ['''input_ids''', '''attention_mask''']
A = MBartTokenizer
A = []
A = []
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ :Tuple = vocab_file
lowerCAmelCase__ :List[Any] = False if not self.vocab_file else True
lowerCAmelCase__ :int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCAmelCase__ :List[str] = {
lang_code: self.convert_tokens_to_ids(_lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase__ :Optional[int] = src_lang if src_lang is not None else 'en_XX'
lowerCAmelCase__ :Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase__ :str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [self.sep_token_id]
lowerCAmelCase__ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ :Union[str, Any] = src_lang
lowerCAmelCase__ :Optional[Any] = self(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :List[str] = self.convert_tokens_to_ids(_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = tgt_lang_id
return inputs
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = "en_XX" , _lowerCAmelCase = None , _lowerCAmelCase = "ro_RO" , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :str = src_lang
lowerCAmelCase__ :Tuple = tgt_lang
return super().prepare_seqaseq_batch(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = self.convert_tokens_to_ids(_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = []
lowerCAmelCase__ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ :Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ :List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ :Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.convert_tokens_to_ids(_lowerCAmelCase )
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Any = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ :Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ :List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ :Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase__ :List[str] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 703
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaPipeline
A = [
'''image_embeds''',
'''negative_image_embeds''',
]
A = ['''image_embeds''', '''negative_image_embeds''']
A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ :List[str] = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.dummy_unet
lowerCAmelCase__ :List[str] = self.dummy_movq
lowerCAmelCase__ :List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
lowerCAmelCase__ :Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ :Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = "cpu"
lowerCAmelCase__ :Dict = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :str = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :Dict = output.images
lowerCAmelCase__ :str = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ :Tuple = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
lowerCAmelCase__ :str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCAmelCase__ :str = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase__ :Union[str, Any] = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = "red cat, 4k photo"
lowerCAmelCase__ :Tuple = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ :List[Any] = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipeline(
image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type="np" , )
lowerCAmelCase__ :int = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111
| 0
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCamelCase ( __lowercase ):
def __init__(self , *lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = eval_examples
_lowerCAmelCase = post_process_function
def A__ (self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = "eval" ):
'''simple docstring'''
_lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCAmelCase = self.get_eval_dataloader(lowerCamelCase )
_lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCAmelCase = self.compute_metrics
_lowerCAmelCase = None
_lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowerCAmelCase = time.time()
try:
_lowerCAmelCase = eval_loop(
lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , metric_key_prefix=lowerCamelCase , )
finally:
_lowerCAmelCase = compute_metrics
_lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase , lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowerCAmelCase = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions )
_lowerCAmelCase = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_lowerCAmelCase = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
else:
_lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase )
return metrics
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase = "test" ):
'''simple docstring'''
_lowerCAmelCase = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCAmelCase = self.compute_metrics
_lowerCAmelCase = None
_lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowerCAmelCase = time.time()
try:
_lowerCAmelCase = eval_loop(
lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , metric_key_prefix=lowerCamelCase , )
finally:
_lowerCAmelCase = compute_metrics
_lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase , lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCAmelCase = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions , """predict""" )
_lowerCAmelCase = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_lowerCAmelCase = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase )
| 156
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = LayoutLMTokenizer
__UpperCamelCase = LayoutLMTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def A__ (self ):
'''simple docstring'''
pass
| 156
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(__A , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase__ : List[str] = {'unk_token': '<unk>'}
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = 'lower newer'
lowercase__ : Optional[int] = 'lower newer'
return input_text, output_text
def snake_case ( self : int ):
lowercase__ : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Any = 'lower newer'
lowercase__ : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase__ : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tokens + [tokenizer.unk_token]
lowercase__ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Dict = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = 'lower newer'
# Testing tokenization
lowercase__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : Any = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
pass
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : List[str] = 'This is a simple input'
lowercase__ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : Optional[int] = ('This is a simple input', 'This is a pair')
lowercase__ : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Any = 'This is a simple input'
lowercase__ : Dict = ['This is a simple input looooooooong', 'This is a simple input']
lowercase__ : Optional[Any] = ('This is a simple input', 'This is a pair')
lowercase__ : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = '$$$'
lowercase__ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = 'This is a simple input'
lowercase__ : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : List[Any] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : Dict = tokenizer.decode(out_s.input_ids )
lowercase__ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : int ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ : List[Any] = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : Dict = 'Encode this.'
lowercase__ : Any = 'This one too please.'
lowercase__ : str = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = encoded_sequence_dict['input_ids']
lowercase__ : Any = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Optional[int] = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = 'A photo of a cat'
lowercase__ : Optional[int] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : List[str] = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Tuple ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = 'A photo of a cat'
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = 'bos'
lowercase__ : List[Any] = tokenizer.get_vocab()['bos']
lowercase__ : Any = 'A photo of a cat'
lowercase__ : int = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Optional[int] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 704
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81
| 0
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int]):
super().__init__()
_lowercase: Tuple = model
_lowercase: int = 2
_lowercase: List[str] = nn.Linear(self.model.config.hidden_size , self.num_labels)
def UpperCAmelCase__ ( self : Optional[int]):
pass
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
# load longformer model from model identifier
_lowercase: List[Any] = LongformerModel.from_pretrained(__magic_name__ )
_lowercase: Optional[Any] = LightningModel(__magic_name__ )
_lowercase: int = torch.load(__magic_name__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_lowercase: Union[str, Any] = LongformerForQuestionAnswering.from_pretrained(__magic_name__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__magic_name__ )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 226
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528
| 0
|
'''simple docstring'''
from math import factorial
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = [1] * rank
else:
lowercase = rank
def __repr__( self ) -> str:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
lowercase = self.duals.copy()
lowercase = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
lowercase = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
__A = __add__
def __sub__( self , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
lowercase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
__A = __mul__
def __truediv__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self , _lowerCAmelCase ) -> Any:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowercase = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
if not callable(lowercase_ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(lowercase_ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowercase = Dual(lowercase_ , 1 )
lowercase = func(lowercase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 653
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE : Any = '''\
'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
SCREAMING_SNAKE_CASE : Optional[int] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def A__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = True , lowerCamelCase=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCAmelCase = """cuda"""
else:
_lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model.to(_lowerCAmelCase )
_lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_lowerCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCAmelCase = model.config.max_length - 1
else:
_lowerCAmelCase = model.config.max_length
_lowerCAmelCase = tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="""pt""" , return_attention_mask=_lowerCAmelCase , ).to(_lowerCAmelCase )
_lowerCAmelCase = encodings["""input_ids"""]
_lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCAmelCase = []
_lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ) ):
_lowerCAmelCase = min(start_index + batch_size , len(_lowerCAmelCase ) )
_lowerCAmelCase = encoded_texts[start_index:end_index]
_lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_lowerCAmelCase )
_lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_lowerCAmelCase ), attn_mask] , dim=1 )
_lowerCAmelCase = encoded_batch
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ).logits
_lowerCAmelCase = out_logits[..., :-1, :].contiguous()
_lowerCAmelCase = labels[..., 1:].contiguous()
_lowerCAmelCase = attn_mask[..., 1:].contiguous()
_lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _lowerCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_lowerCAmelCase )}
| 156
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''trocr'''
__A = ['''past_key_values''']
__A = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _lowerCAmelCase=5_0265 , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=16 , _lowerCAmelCase=4096 , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = activation_function
lowercase = max_position_embeddings
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = init_std
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = scale_embedding
lowercase = use_learned_position_embeddings
lowercase = layernorm_embedding
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 588
| 0
|
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__lowerCamelCase : Any = set()
# Replace all the whitespace in our sentence
__lowerCamelCase : List[str] = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCamelCase__ ) == 2_6
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__lowerCamelCase : Any = [False] * 2_6
for char in input_str:
if char.islower():
__lowerCamelCase : Any = True
elif char.isupper():
__lowerCamelCase : Tuple = True
return all(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
__lowerCamelCase : int = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=lowerCamelCase__ ) )
print(timeit('is_pangram_faster()' , setup=lowerCamelCase__ ) )
print(timeit('is_pangram_fastest()' , setup=lowerCamelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 337
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : List[Any] = state_dict.pop(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = val
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase : Optional[Any] = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__lowerCamelCase : str = value
else:
__lowerCamelCase : List[Any] = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : str = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase : int = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCamelCase : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Optional[Any] = in_proj_weight[:2_5_6, :]
__lowerCamelCase : Dict = in_proj_bias[:2_5_6]
__lowerCamelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
__lowerCamelCase : List[str] = in_proj_bias[2_5_6:5_1_2]
__lowerCamelCase : Dict = in_proj_weight[-2_5_6:, :]
__lowerCamelCase : Dict = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCamelCase : Tuple = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[:2_5_6, :]
__lowerCamelCase : List[str] = in_proj_bias[:2_5_6]
__lowerCamelCase : Union[str, Any] = in_proj_weight[2_5_6:5_1_2, :]
__lowerCamelCase : Optional[int] = in_proj_bias[2_5_6:5_1_2]
__lowerCamelCase : Union[str, Any] = in_proj_weight[-2_5_6:, :]
__lowerCamelCase : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
__lowerCamelCase : List[Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__lowerCamelCase : str = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCamelCase : Tuple = in_proj_weight_cross_attn[:2_5_6, :]
__lowerCamelCase : List[Any] = in_proj_bias_cross_attn[:2_5_6]
__lowerCamelCase : Optional[int] = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
__lowerCamelCase : Tuple = in_proj_bias_cross_attn[2_5_6:5_1_2]
__lowerCamelCase : Optional[Any] = in_proj_weight_cross_attn[-2_5_6:, :]
__lowerCamelCase : Optional[int] = in_proj_bias_cross_attn[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase , __lowerCamelCase : Tuple = image.size
__lowerCamelCase : Dict = max(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
__lowerCamelCase : Optional[int] = target_max_size / current_max_size
__lowerCamelCase : int = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : int = F.to_tensor(lowerCamelCase__ )
__lowerCamelCase : List[str] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
logger.info('Converting model...' )
# load original state dict
__lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase : Optional[Any] = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCamelCase : int = state_dict.pop(lowerCamelCase__ )
__lowerCamelCase : Tuple = val
# create HuggingFace model and load state dict
__lowerCamelCase : Union[str, Any] = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__lowerCamelCase : int = 1_5
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : int = {0: 'table', 1: 'table rotated'}
__lowerCamelCase : Union[str, Any] = idalabel
__lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
__lowerCamelCase : List[str] = 1_2_5
__lowerCamelCase : str = 6
__lowerCamelCase : List[Any] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
__lowerCamelCase : List[str] = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Optional[int] = DetrImageProcessor(
format='coco_detection' , max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
__lowerCamelCase : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
__lowerCamelCase : Optional[Any] = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
__lowerCamelCase : Dict = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=lowerCamelCase__ )
__lowerCamelCase : Dict = Image.open(lowerCamelCase__ ).convert('RGB' )
__lowerCamelCase : Any = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
__lowerCamelCase : Union[str, Any] = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
__lowerCamelCase : Dict = (1, 1_5, 3)
__lowerCamelCase : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
__lowerCamelCase : Dict = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
__lowerCamelCase : Tuple = (1, 1_2_5, 7)
__lowerCamelCase : Optional[int] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
__lowerCamelCase : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
__lowerCamelCase : List[str] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 337
| 1
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if index == r:
for j in range(lowerCAmelCase__ ):
print(data[j] ,end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
A__ = arr[i]
combination_util(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,index + 1 ,lowerCAmelCase__ ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
# A temporary array to store all combination one by one
A__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,0 ,lowerCAmelCase__ ,0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 260
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = CodeGenTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: str = {"""add_prefix_space""": True}
SCREAMING_SNAKE_CASE_: Any = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A__ = dict(zip(__a , range(len(__a ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
A__ = tokens + [tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = 'lower newer'
# Testing tokenization
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
A__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = tokenizer.encode(__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self , *__a , **__a ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self , __a=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input looooooooong', 'This is a simple input']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
A__ = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = '$$$'
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = tokenizer.bos_token_id
A__ = tokenizer(__a )
A__ = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A__ = tokenizer.decode(out_s.input_ids )
A__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
A__ = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
A__ = '\nif len_a > len_b: result = a\nelse: result = b'
A__ = tokenizer.encode(__a )
A__ = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
A__ = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 260
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A_ : Optional[Any] = logging.get_logger(__name__)
# General docstring
A_ : Optional[int] = '''RegNetConfig'''
# Base docstring
A_ : Union[str, Any] = '''facebook/regnet-y-040'''
A_ : int = [1, 10_88, 7, 7]
# Image classification docstring
A_ : Any = '''facebook/regnet-y-040'''
A_ : Dict = '''tabby, tabby cat'''
A_ : Optional[Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[str] = "relu" , ) -> str:
"""simple docstring"""
super().__init__()
a = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , groups=__lowerCAmelCase , bias=__lowerCAmelCase , )
a = nn.BatchNormad(__lowerCAmelCase )
a = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Tuple , __lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
a = self.convolution(__lowerCAmelCase )
a = self.normalization(__lowerCAmelCase )
a = self.activation(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCAmelCase : RegNetConfig ) -> Dict:
"""simple docstring"""
super().__init__()
a = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
a = config.num_channels
def A ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
a = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
a = self.embedder(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
a = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase )
a = nn.BatchNormad(__lowerCAmelCase )
def A ( self : Any , __lowerCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
a = self.convolution(__lowerCAmelCase )
a = self.normalization(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
super().__init__()
a = nn.AdaptiveAvgPoolad((1, 1) )
a = nn.Sequential(
nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def A ( self : str , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = self.pooler(__lowerCAmelCase )
a = self.attention(__lowerCAmelCase )
a = hidden_state * attention
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 ) -> Any:
"""simple docstring"""
super().__init__()
a = in_channels != out_channels or stride != 1
a = max(1 , out_channels // config.groups_width )
a = (
RegNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a = nn.Sequential(
RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
a = ACTaFN[config.hidden_act]
def A ( self : List[str] , __lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
a = hidden_state
a = self.layer(__lowerCAmelCase )
a = self.shortcut(__lowerCAmelCase )
hidden_state += residual
a = self.activation(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a = in_channels != out_channels or stride != 1
a = max(1 , out_channels // config.groups_width )
a = (
RegNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a = nn.Sequential(
RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act ) , RegNetSELayer(__lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
a = ACTaFN[config.hidden_act]
def A ( self : Tuple , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = hidden_state
a = self.layer(__lowerCAmelCase )
a = self.shortcut(__lowerCAmelCase )
hidden_state += residual
a = self.activation(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Any , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
a = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
a = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for _ in range(depth - 1 )] , )
def A ( self : Tuple , __lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
a = self.layers(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Tuple , __lowerCAmelCase : RegNetConfig ) -> str:
"""simple docstring"""
super().__init__()
a = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) )
def A ( self : int , __lowerCAmelCase : Tensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a = hidden_states + (hidden_state,)
a = stage_module(__lowerCAmelCase )
if output_hidden_states:
a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = RegNetConfig
_UpperCAmelCase = '''regnet'''
_UpperCAmelCase = '''pixel_values'''
_UpperCAmelCase = True
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = value
A_ : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A_ : str = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', UpperCAmelCase__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : List[Any] , __lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = config
a = RegNetEmbeddings(__lowerCAmelCase )
a = RegNetEncoder(__lowerCAmelCase )
a = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Tuple , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.embedder(__lowerCAmelCase )
a = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
a = encoder_outputs[0]
a = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', UpperCAmelCase__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Tuple , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = config.num_labels
a = RegNetModel(__lowerCAmelCase )
# classification head
a = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : List[str] , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.regnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
a = outputs.pooler_output if return_dict else outputs[1]
a = self.classifier(__lowerCAmelCase )
a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a = "single_label_classification"
else:
a = "multi_label_classification"
if self.config.problem_type == "regression":
a = MSELoss()
if self.num_labels == 1:
a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a = BCEWithLogitsLoss()
a = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
a = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
| 720
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
| 0
|
class _snake_case : # Public class to implement a graph
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> None:
snake_case__ :str = row
snake_case__ :Dict = col
snake_case__ :Any = graph
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> None:
# Checking all 8 elements surrounding nth element
snake_case__ :Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
snake_case__ :Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
snake_case__ :List[str] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,UpperCamelCase ):
self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int: # And finally, count all islands.
snake_case__ :Any = [[False for j in range(self.COL )] for i in range(self.ROW )]
snake_case__ :Union[str, Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
count += 1
return count
| 241
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase_ ( __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : str ) -> Tuple:
'''simple docstring'''
for char in word:
snake_case__ :Dict = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowercase_ ( __snake_case : List[str] ) -> Any:
'''simple docstring'''
snake_case__ :Optional[int] = set()
for token in tokens:
snake_case__ :Dict = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
snake_case__ :Tuple = list(__snake_case )
return word_list
def lowercase_ ( __snake_case : List[str] , __snake_case : set() ) -> int:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :List[str] = max([len(__snake_case ) for w in chinese_word_set] )
snake_case__ :str = bert_tokens
snake_case__ , snake_case__ :Dict = 0, len(__snake_case )
while start < end:
snake_case__ :Any = True
if is_chinese(bert_word[start] ):
snake_case__ :Union[str, Any] = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
snake_case__ :str = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :int = "##" + bert_word[j]
snake_case__ :str = start + i
snake_case__ :Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : List[str] , __snake_case : LTP , __snake_case : BertTokenizer ) -> List[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :Any = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
snake_case__ :Optional[Any] = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :int = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :str = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :Union[str, Any] = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
snake_case__ :Dict = []
for id in input_ids:
snake_case__ :Tuple = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
snake_case__ :Tuple = add_sub_symbol(__snake_case , __snake_case )
snake_case__ :Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
snake_case__ :Optional[Any] = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowercase_ ( __snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :Optional[int] = f.readlines()
snake_case__ :Union[str, Any] = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :Optional[int] = LTP(args.ltp ) # faster in GPU device
snake_case__ :Optional[int] = BertTokenizer.from_pretrained(args.bert )
snake_case__ :str = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :List[str] = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__UpperCAmelCase : str = parser.parse_args()
main(args)
| 241
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , a : Any , a : int=2 , a : Dict=True , a : List[str]=False , a : str=10 , a : Optional[int]=3 , a : Union[str, Any]=32 * 8 , a : List[Any]=32 * 8 , a : int=4 , a : Any=64 , ):
'''simple docstring'''
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : Dict = use_auxiliary_loss
lowerCAmelCase__ : str = num_queries
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : Tuple = min_size
lowerCAmelCase__ : int = max_size
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : Any = hidden_dim
lowerCAmelCase__ : Tuple = hidden_dim
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
lowerCAmelCase__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
lowerCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase__ : Optional[Any] = self.num_queries
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : int = [1, 1, 1, 1]
lowerCAmelCase__ : Dict = self.num_channels
lowerCAmelCase__ : Dict = 64
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : Optional[Any] = self.hidden_dim
lowerCAmelCase__ : Tuple = self.hidden_dim
lowerCAmelCase__ : str = self.hidden_dim
return config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _lowerCamelCase ( self : Optional[int] , a : Optional[int] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = output.encoder_hidden_states
lowerCAmelCase__ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase__ : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_layers )
def _lowerCamelCase ( self : Dict , a : Union[str, Any] , a : List[Any] , a : int , a : int=False ):
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase__ : List[str] = MaskaFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : List[str] = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self : Optional[Any] , a : int , a : Optional[Any] , a : Union[str, Any] , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : int = MaskaFormerForUniversalSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(a : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : str = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Dict = MaskaFormerModelTester(self )
lowerCAmelCase__ : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase__ : List[str] = MaskaFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : List[str] = {
'pixel_values': torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
'mask_labels': torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
'class_labels': torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
lowerCAmelCase__ : Optional[Any] = self.model_tester.get_config()
lowerCAmelCase__ : Dict = MaskaFormerForUniversalSegmentation(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase__ : List[Any] = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase__ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : int = True
lowerCAmelCase__ : List[str] = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase__ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1E-4
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : List[Any] = prepare_img()
lowerCAmelCase__ : Tuple = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase__ : Any = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Any = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Dict = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase__ : Tuple = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowerCAmelCase__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase__ : List[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
lowerCAmelCase__ : Any = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowerCAmelCase__ : Optional[int] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase__ : Any = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
lowerCAmelCase__ : str = inputs['pixel_values'].to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['mask_labels']]
lowerCAmelCase__ : int = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['class_labels']]
with torch.no_grad():
lowerCAmelCase__ : str = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 713
|
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
| 0
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCamelCase = 299_792_458
# Symbols
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = symbols("ct x y z")
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE ) ** 2 )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE ), -gamma(SCREAMING_SNAKE_CASE ) * beta(SCREAMING_SNAKE_CASE ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE ) * beta(SCREAMING_SNAKE_CASE ), gamma(SCREAMING_SNAKE_CASE ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[str]:
if event is None:
_lowercase : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCamelCase = transform(29_979_245)
print("Example of four vector: ")
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
UpperCamelCase = {ct: c, x: 1, y: 1, z: 1}
UpperCamelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 66
|
'''simple docstring'''
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( UpperCAmelCase = 100 ):
'''simple docstring'''
A__ = 1
A__ = 2
for i in range(2 ,max_n + 1 ):
A__ = pre_numerator
A__ = 2 * i // 3 if i % 3 == 0 else 1
A__ = cur_numerator
A__ = e_cont * pre_numerator + temp
return sum_digits(UpperCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 531
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
lowerCamelCase : Dict = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
lowerCamelCase : int = "▁"
# Segments (not really needed)
lowerCamelCase : Dict = 0
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Tuple = 2
lowerCamelCase : List[str] = 3
lowerCamelCase : Optional[Any] = 4
class _UpperCamelCase (UpperCamelCase_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = """left"""
snake_case_ = XLNetTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase=["<eop>", "<eod>"] , **__UpperCamelCase , )-> Any:
__lowerCAmelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
__lowerCAmelCase = 3
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 712
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : str = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _UpperCamelCase (a_ ):
snake_case_ = 42
class _UpperCamelCase (a_ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Dict:
super().__init__()
self.register_modules(
prior=__UpperCamelCase , image_encoder=__UpperCamelCase , image_processor=__UpperCamelCase , scheduler=__UpperCamelCase , renderer=__UpperCamelCase , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
if latents is None:
__lowerCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowerCAmelCase = latents.to(__UpperCamelCase )
__lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , __UpperCamelCase=0 )-> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
__lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
@property
def __UpperCAmelCase ( self )-> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Union[str, Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(__UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__UpperCamelCase , axis=0 )
if not isinstance(__UpperCamelCase , torch.Tensor ):
__lowerCAmelCase = self.image_processor(__UpperCamelCase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__UpperCamelCase )
__lowerCAmelCase = self.image_encoder(__UpperCamelCase )["last_hidden_state"]
__lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase = torch.zeros_like(__UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 2_5 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 4.0 , __UpperCamelCase = 6_4 , __UpperCamelCase = "pil" , __UpperCamelCase = True , )-> Dict:
if isinstance(__UpperCamelCase , PIL.Image.Image ):
__lowerCAmelCase = 1
elif isinstance(__UpperCamelCase , torch.Tensor ):
__lowerCAmelCase = image.shape[0]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase = len(__UpperCamelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__UpperCamelCase )}""" )
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = batch_size * num_images_per_prompt
__lowerCAmelCase = guidance_scale > 1.0
__lowerCAmelCase = self._encode_image(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# prior
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
__lowerCAmelCase = self.scheduler.timesteps
__lowerCAmelCase = self.prior.config.num_embeddings
__lowerCAmelCase = self.prior.config.embedding_dim
__lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase = latents.reshape(latents.shape[0] , __UpperCamelCase , __UpperCamelCase )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = self.prior(
__UpperCamelCase , timestep=__UpperCamelCase , proj_embedding=__UpperCamelCase , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase = self.scheduler.step(
__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__UpperCamelCase )
__lowerCAmelCase = []
for i, latent in enumerate(__UpperCamelCase ):
print()
__lowerCAmelCase = self.renderer.decode(
latent[None, :] , __UpperCamelCase , size=__UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(__UpperCamelCase )
__lowerCAmelCase = torch.stack(__UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
__lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase = [self.numpy_to_pil(__UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__UpperCamelCase )
| 290
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ (lowercase__ ):
__lowerCAmelCase : str = (DDPMScheduler,)
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[str] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_UpperCamelCase )
return config
def __UpperCamelCase ( self ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __UpperCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCamelCase )
def __UpperCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def __UpperCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCamelCase ( self ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_UpperCamelCase )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : int = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**_UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = len(_UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
_lowerCAmelCase : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Union[str, Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Any = pred_prev_sample
_lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**_UpperCamelCase )
_lowerCAmelCase : List[Any] = len(_UpperCamelCase )
_lowerCAmelCase : Optional[Any] = self.dummy_model()
_lowerCAmelCase : List[Any] = self.dummy_sample_deter
_lowerCAmelCase : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
_lowerCAmelCase : List[Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Optional[int] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : int = pred_prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Dict = scheduler_class(**_UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCamelCase )
_lowerCAmelCase : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCamelCase ):
if i == len(_UpperCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = -1
else:
_lowerCAmelCase : int = timesteps[i + 1]
_lowerCAmelCase : Union[str, Any] = scheduler.previous_timestep(_UpperCamelCase )
_lowerCAmelCase : List[Any] = prev_t.item()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_UpperCamelCase )
_lowerCAmelCase : Dict = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_UpperCamelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**_UpperCamelCase )
_lowerCAmelCase : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
_lowerCAmelCase : Tuple = len(_UpperCamelCase )
with self.assertRaises(_UpperCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_UpperCamelCase , timesteps=_UpperCamelCase )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**_UpperCamelCase )
_lowerCAmelCase : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
| 384
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowercase__ ):
lowercase : List[str] = ['image_processor', 'tokenizer']
lowercase : Tuple = 'LayoutLMv3ImageProcessor'
lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self :str ,_UpperCamelCase :Optional[Any]=None ,_UpperCamelCase :Union[str, Any]=None ,**_UpperCamelCase :Optional[Any] ):
snake_case_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_UpperCamelCase ,)
snake_case_ : Union[str, Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_UpperCamelCase ,_UpperCamelCase )
def __call__( self :int ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_UpperCamelCase :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,_UpperCamelCase :Union[List[List[int]], List[List[List[int]]]] = None ,_UpperCamelCase :Optional[Union[List[int], List[List[int]]]] = None ,_UpperCamelCase :bool = True ,_UpperCamelCase :Union[bool, str, PaddingStrategy] = False ,_UpperCamelCase :Union[bool, str, TruncationStrategy] = None ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,**_UpperCamelCase :Tuple ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
snake_case_ : List[Any] = self.image_processor(images=_UpperCamelCase ,return_tensors=_UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Any = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : Optional[int] = features["""words"""]
snake_case_ : str = self.tokenizer(
text=text if text is not None else features["""words"""] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["""boxes"""] ,word_labels=_UpperCamelCase ,add_special_tokens=_UpperCamelCase ,padding=_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ,stride=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_token_type_ids=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,return_overflowing_tokens=_UpperCamelCase ,return_special_tokens_mask=_UpperCamelCase ,return_offsets_mapping=_UpperCamelCase ,return_length=_UpperCamelCase ,verbose=_UpperCamelCase ,return_tensors=_UpperCamelCase ,**_UpperCamelCase ,)
# add pixel values
snake_case_ : List[str] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case_ : Optional[Any] = self.get_overflowing_images(_UpperCamelCase ,encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case_ : int = images
return encoded_inputs
def a__ ( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case_ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F''' {len(_UpperCamelCase )} and {len(_UpperCamelCase )}''' )
return images_with_overflow
def a__ ( self :Tuple ,*_UpperCamelCase :Dict ,**_UpperCamelCase :str ):
return self.tokenizer.batch_decode(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :List[str] ,*_UpperCamelCase :Dict ,**_UpperCamelCase :Union[str, Any] ):
return self.tokenizer.decode(*_UpperCamelCase ,**_UpperCamelCase )
@property
def a__ ( self :Union[str, Any] ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a__ ( self :Union[str, Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_UpperCamelCase ,)
return self.image_processor_class
@property
def a__ ( self :Tuple ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_UpperCamelCase ,)
return self.image_processor
| 334
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str = 1_0 , lowerCamelCase_ : Any = 1_0_0_0 , lowerCamelCase_ : Optional[int] = True ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
return int((number_a + number_a) / 2 )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowerCamelCase_ : Optional[int] ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
__lowercase = lower
__lowercase = higher
__lowercase = []
while True:
__lowercase = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
__lowercase = number
elif answer(UpperCAmelCase__ ) == "high":
__lowercase = number
else:
break
print(f"guess the number : {last_numbers[-1]}" )
print(f"details : {last_numbers!s}" )
def _lowerCAmelCase ( ):
__lowercase = int(input('''Enter lower value : ''' ).strip() )
__lowercase = int(input('''Enter high value : ''' ).strip() )
__lowercase = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 713
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = ShapEPipeline
lowerCamelCase_ = ['prompt']
lowerCamelCase_ = ['prompt']
lowerCamelCase_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase_ = False
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return 8
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__lowerCamelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_SCREAMING_SNAKE_CASE = PriorTransformer(**__lowerCamelCase )
return model
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
_SCREAMING_SNAKE_CASE = ShapERenderer(**__lowerCamelCase )
return model
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.dummy_prior
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_renderer
_SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
_SCREAMING_SNAKE_CASE = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCAmelCase_ ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=0 ):
"""simple docstring"""
if str(__lowerCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = output.images[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_SCREAMING_SNAKE_CASE = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_device == "cpu"
_SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
_SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
_SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
_SCREAMING_SNAKE_CASE = ShapEPipeline.from_pretrained("openai/shap-e" )
_SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
"a shark" , generator=__lowerCamelCase , guidance_scale=1_5.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 418
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 5_0 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[ImagePipelineOutput, Tuple]:
__a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase , )
__a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase ), "This is a local test"
| 559
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="openai/whisper-base"
lowerCamelCase__ : Tuple =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
lowerCamelCase__ : List[Any] ="transcriber"
lowerCamelCase__ : Dict =WhisperProcessor
lowerCamelCase__ : Union[str, Any] =WhisperForConditionalGeneration
lowerCamelCase__ : int =["audio"]
lowerCamelCase__ : int =["text"]
def lowercase ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return self.pre_processor(lowerCamelCase , return_tensors='''pt''' ).input_features
def lowercase ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPFeatureExtractor''']
lowercase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 336
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_lowerCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_lowerCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_lowerCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_lowerCamelCase , default=10_00 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_lowerCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_lowerCamelCase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_lowerCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__snake_case = parser.parse_args()
return args
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
def fn(_lowerCamelCase : List[str] ):
return tokenizer(examples['''text'''] )
return fn
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
__snake_case = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__snake_case = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__snake_case = tf.train.Features(feature=_lowerCamelCase )
__snake_case = tf.train.Example(features=_lowerCamelCase )
__snake_case = example.SerializeToString()
records.append(_lowerCamelCase )
return records
def _UpperCamelCase (_lowerCamelCase : str )-> Any:
'''simple docstring'''
__snake_case = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case = min(len(_lowerCamelCase ) , args.limit )
__snake_case = dataset.select(range(_lowerCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
else:
__snake_case = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case = tokenize_function(_lowerCamelCase )
__snake_case = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCamelCase : Optional[int] ):
# Concatenate all texts.
__snake_case = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case = dataset_tokenized.map(_lowerCamelCase , batched=_lowerCamelCase , batch_size=10_00 , num_proc=4 )
__snake_case = 0
__snake_case = 0
for shard in range(0 , len(_lowerCamelCase ) , args.shard_size ):
__snake_case = grouped_dataset[shard : shard + args.shard_size]
__snake_case = len(dataset_snapshot['''input_ids'''] )
__snake_case = os.path.join(_lowerCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__snake_case = get_serialized_examples(_lowerCamelCase )
with tf.io.TFRecordWriter(_lowerCamelCase ) as out_file:
for i in range(len(_lowerCamelCase ) ):
__snake_case = serialized_examples[i]
out_file.write(_lowerCamelCase )
print('''Wrote file {} containing {} records'''.format(_lowerCamelCase , _lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = parse_args()
main(args)
| 24
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24
| 1
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : str , *A__ : Dict , **A__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*A__ , **A__ )
def a_ ( self : Any , A__ : List[Any] , A__ : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : Tuple = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(A__ )
__lowerCamelCase : Optional[int] = self.values[key]
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return (
sum(self.charge_factor - len(A__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a_ ( self : List[Any] , A__ : Any , A__ : Union[str, Any]=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(A__ ) == 0
):
return key
return super()._collision_resolution(A__ , A__ )
| 483
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def __lowercase () -> Tuple:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""", type=_lowercase, default="""biencoder-nq-dev.json""", help="""Path to raw DPR training data""", )
parser.add_argument(
"""--evaluation_set""", type=_lowercase, help="""where to store parsed evaluation_set file""", )
parser.add_argument(
"""--gold_data_path""", type=_lowercase, help="""where to store parsed gold_data_path file""", )
__lowerCamelCase : Tuple = parser.parse_args()
with open(args.src_path, """r""" ) as src_file, open(args.evaluation_set, """w""" ) as eval_file, open(
args.gold_data_path, """w""" ) as gold_file:
__lowerCamelCase : int = json.load(_lowercase )
for dpr_record in tqdm(_lowercase ):
__lowerCamelCase : List[Any] = dpr_record["""question"""]
__lowerCamelCase : List[str] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(_lowercase ) + """\n""" )
if __name__ == "__main__":
main()
| 483
| 1
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_lowercase : str =logging.getLogger()
def lowerCAmelCase_ ( _lowercase : int , _lowercase : List[Any]) -> Optional[Any]:
"""simple docstring"""
a__ : List[Any] = "\n".join(_lowercase)
Path(_lowercase).open("""w""").writelines(_lowercase)
_lowercase : int ="""patrickvonplaten/t5-tiny-random"""
_lowercase : Optional[Any] ="""sshleifer/bart-tiny-random"""
_lowercase : List[Any] ="""sshleifer/tiny-mbart"""
_lowercase : Optional[Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Any = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ : str = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ : Tuple = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(__lowercase , __lowercase )
a__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
a__ : Optional[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ : Union[str, Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(__lowercase , """argv""" , __lowercase ):
run_generate()
assert Path(__lowercase ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
self.run_eval_tester(__lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
self.run_eval_tester(__lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ : Any = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ : Optional[int] = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
a__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
a__ : Tuple = str(tmp_dir / """scores.json""" )
a__ : Optional[int] = str(tmp_dir / """val.target""" )
_dump_articles(__lowercase , text["""en"""] )
_dump_articles(__lowercase , text["""de"""] )
a__ : Tuple = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ : Any = F'''
run_eval_search.py
{model}
{str(__lowercase )}
{str(__lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(__lowercase , """argv""" , __lowercase ):
with CaptureStdout() as cs:
run_search()
a__ : List[str] = [" num_beams | length_penalty", model, "Best score args"]
a__ : List[str] = ["Info"]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(__lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__lowercase ).exists()
os.remove(Path(__lowercase ) )
| 136
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=False , snake_case=True , snake_case="None" , snake_case=3 , snake_case=4 , snake_case=None , ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = parent
a__ : Dict = batch_size
a__ : List[Any] = seq_length
a__ : List[str] = is_training
a__ : str = use_input_mask
a__ : Union[str, Any] = use_token_type_ids
a__ : Optional[int] = use_labels
a__ : List[Any] = vocab_size
a__ : Optional[Any] = hidden_size
a__ : Union[str, Any] = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : Dict = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : int = type_vocab_size
a__ : Any = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[Any] = num_labels
a__ : Optional[int] = num_choices
a__ : int = relative_attention
a__ : str = position_biased_input
a__ : List[Any] = pos_att_type
a__ : Union[str, Any] = scope
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Union[str, Any] = None
if self.use_input_mask:
a__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Optional[Any] = None
if self.use_token_type_ids:
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Dict = None
a__ : List[Any] = None
a__ : Dict = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
a__ : Dict = TFDebertaVaModel(config=snake_case )
a__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a__ : Union[str, Any] = [input_ids, input_mask]
a__ : Any = model(snake_case )
a__ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = TFDebertaVaForMaskedLM(config=snake_case )
a__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : List[Any] = self.num_labels
a__ : int = TFDebertaVaForSequenceClassification(config=snake_case )
a__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : Dict = self.num_labels
a__ : str = TFDebertaVaForTokenClassification(config=snake_case )
a__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=snake_case )
a__ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : List[str] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : int = config_and_inputs
a__ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[str] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = False
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Any = TFDebertaVaModelTester(self )
a__ : int = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(snake_case )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a__ : Any = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
a__ : List[str] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ : Tuple = model(snake_case , attention_mask=snake_case )[0]
a__ : List[Any] = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , snake_case , atol=1E-4 )
| 112
| 0
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {"add_prefix_space": True}
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_lowerCAmelCase : Dict = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : Union[str, Any] = {'unk_token': '<unk>'}
_lowerCAmelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'lower newer'
_lowerCAmelCase : int = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowerCAmelCase : List[Any] = 'lower newer'
_lowerCAmelCase : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCAmelCase : Any = tokenizer.tokenize(_A ,add_prefix_space=_A )
self.assertListEqual(_A ,_A )
_lowerCAmelCase : Any = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=_A )
_lowerCAmelCase : Dict = 'lower newer'
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(_A ,add_prefix_space=_A )
_lowerCAmelCase : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A ,_A )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(_A ,add_special_tokens=_A ,add_prefix_space=_A )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(_A ,add_special_tokens=_A )
self.assertListEqual(_A ,_A )
# Testing conversion to ids with special tokens
_lowerCAmelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=_A )
_lowerCAmelCase : Optional[int] = tokenizer.encode(_A ,add_prefix_space=_A )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A ,_A )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) ,_A )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ,_A=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_A ,**_A )
# Simple input
_lowerCAmelCase : Dict = 'This is a simple input'
_lowerCAmelCase : Dict = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase : Any = ('This is a simple input', 'This is a pair')
_lowerCAmelCase : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_A ,tokenizer_r.encode ,_A ,max_length=_A ,padding='max_length' )
# Simple input
self.assertRaises(_A ,tokenizer_r.encode_plus ,_A ,max_length=_A ,padding='max_length' )
# Simple input
self.assertRaises(
_A ,tokenizer_r.batch_encode_plus ,_A ,max_length=_A ,padding='max_length' ,)
# Pair input
self.assertRaises(_A ,tokenizer_r.encode ,_A ,max_length=_A ,padding='max_length' )
# Pair input
self.assertRaises(_A ,tokenizer_r.encode_plus ,_A ,max_length=_A ,padding='max_length' )
# Pair input
self.assertRaises(
_A ,tokenizer_r.batch_encode_plus ,_A ,max_length=_A ,padding='max_length' ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' )
# Simple input
_lowerCAmelCase : str = 'This is a simple input'
_lowerCAmelCase : List[Any] = ['This is a simple input looooooooong', 'This is a simple input']
_lowerCAmelCase : Optional[Any] = ('This is a simple input', 'This is a pair')
_lowerCAmelCase : List[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer(_A ,padding='max_length' ,max_length=30 ,return_tensors='np' )
_lowerCAmelCase : Optional[int] = tokenizer(_A ,padding=_A ,truncate=_A ,return_tensors='np' )
_lowerCAmelCase : Dict = tokenizer(*_A ,padding='max_length' ,max_length=60 ,return_tensors='np' )
_lowerCAmelCase : Optional[Any] = tokenizer(_A ,padding=_A ,truncate=_A ,return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = '$$$'
_lowerCAmelCase : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=_A ,add_bos_token=_A )
_lowerCAmelCase : List[Any] = 'This is a simple input'
_lowerCAmelCase : int = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase : Dict = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer(_A )
_lowerCAmelCase : List[Any] = tokenizer(_A )
self.assertEqual(out_s.input_ids[0] ,_A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Any = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,_A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_lowerCAmelCase : Union[str, Any] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_lowerCAmelCase : str = '\nif len_a > len_b: result = a\nelse: result = b'
_lowerCAmelCase : Optional[Any] = tokenizer.encode(_A )
_lowerCAmelCase : str = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_lowerCAmelCase : List[Any] = tokenizer.decode(_A ,truncate_before_pattern=_A )
self.assertEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 16
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16
| 1
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCAmelCase: List[Any] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCAmelCase: List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowercase( __a : str ):
if "://" in dataset_path:
a__ =dataset_path.split('://' )[1]
return dataset_path
def _lowercase( __a : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowercase( __a : fsspec.AbstractFileSystem , __a : str , __a : str ):
a__ =not is_remote_filesystem(__a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) )
else:
fs.mv(__a , __a , recursive=__a )
def _lowercase( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
a__ =None
a__ =None
a__ =threading.Lock()
| 20
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_A: Any = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def _lowerCAmelCase ( )-> Optional[int]:
__UpperCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
__UpperCAmelCase = g.get_repo('huggingface/accelerate' )
__UpperCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
__UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
__UpperCAmelCase = comments[0] if len(_lowerCAmelCase ) > 0 else None
__UpperCAmelCase = dt.utcnow()
__UpperCAmelCase = (current_time - issue.updated_at).days
__UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 126
| 0
|
from ...processing_utils import ProcessorMixin
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[Any] = """SpeechT5FeatureExtractor"""
__lowerCAmelCase : Tuple = """SpeechT5Tokenizer"""
def __init__( self :List[Any] ,__lowercase :str ,__lowercase :Tuple ):
super().__init__(__lowercase ,__lowercase )
def __call__( self :Union[str, Any] ,*__lowercase :List[str] ,**__lowercase :str ):
snake_case__ : List[Any] = kwargs.pop('''audio''' ,__lowercase )
snake_case__ : int = kwargs.pop('''text''' ,__lowercase )
snake_case__ : Union[str, Any] = kwargs.pop('''text_target''' ,__lowercase )
snake_case__ : List[Any] = kwargs.pop('''audio_target''' ,__lowercase )
snake_case__ : Tuple = kwargs.pop('''sampling_rate''' ,__lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
snake_case__ : Dict = self.feature_extractor(__lowercase ,*__lowercase ,sampling_rate=__lowercase ,**__lowercase )
elif text is not None:
snake_case__ : str = self.tokenizer(__lowercase ,**__lowercase )
else:
snake_case__ : Optional[Any] = None
if audio_target is not None:
snake_case__ : int = self.feature_extractor(audio_target=__lowercase ,*__lowercase ,sampling_rate=__lowercase ,**__lowercase )
snake_case__ : List[Any] = targets['''input_values''']
elif text_target is not None:
snake_case__ : Optional[int] = self.tokenizer(__lowercase ,**__lowercase )
snake_case__ : Optional[Any] = targets['''input_ids''']
else:
snake_case__ : int = None
if inputs is None:
return targets
if targets is not None:
snake_case__ : Tuple = labels
snake_case__ : Tuple = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
snake_case__ : Any = decoder_attention_mask
return inputs
def __lowerCamelCase ( self :int ,*__lowercase :Union[str, Any] ,**__lowercase :str ):
snake_case__ : Tuple = kwargs.pop('''input_values''' ,__lowercase )
snake_case__ : Tuple = kwargs.pop('''input_ids''' ,__lowercase )
snake_case__ : List[Any] = kwargs.pop('''labels''' ,__lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
snake_case__ : int = self.feature_extractor.pad(__lowercase ,*__lowercase ,**__lowercase )
elif input_ids is not None:
snake_case__ : Optional[int] = self.tokenizer.pad(__lowercase ,**__lowercase )
else:
snake_case__ : int = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowercase ,__lowercase ) and "input_ids" in labels[0]):
snake_case__ : int = self.tokenizer.pad(__lowercase ,**__lowercase )
snake_case__ : Any = targets['''input_ids''']
else:
snake_case__ : Tuple = self.feature_extractor.feature_size
snake_case__ : List[Any] = self.feature_extractor.num_mel_bins
snake_case__ : int = self.feature_extractor.pad(__lowercase ,*__lowercase ,**__lowercase )
snake_case__ : Optional[Any] = feature_size_hack
snake_case__ : Tuple = targets['''input_values''']
else:
snake_case__ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
snake_case__ : List[str] = labels
snake_case__ : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
snake_case__ : Dict = decoder_attention_mask
return inputs
def __lowerCamelCase ( self :Tuple ,*__lowercase :Optional[int] ,**__lowercase :str ):
return self.tokenizer.batch_decode(*__lowercase ,**__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,*__lowercase :Optional[int] ,**__lowercase :Union[str, Any] ):
return self.tokenizer.decode(*__lowercase ,**__lowercase )
| 219
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCAmelCase ( __lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
snake_case__ : Any = subparsers.add_parser('''tpu-config''' , description=_description )
else:
snake_case__ : int = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
snake_case__ : Tuple = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
snake_case__ : str = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
snake_case__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : int = defaults.commands
if not args.tpu_name:
snake_case__ : List[Any] = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case__ : Union[str, Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
snake_case__ : List[str] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
snake_case__ : Any = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
snake_case__ : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : List[str] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case__ : Dict = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : Optional[int] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(__lowerCAmelCase )}""" )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = tpu_command_parser()
snake_case__ : int = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 219
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class __A ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = """lilt"""
def __init__( self : int ,_snake_case : Optional[Any]=30_522 ,_snake_case : str=768 ,_snake_case : int=12 ,_snake_case : Optional[Any]=12 ,_snake_case : Optional[int]=3_072 ,_snake_case : Tuple="gelu" ,_snake_case : Dict=0.1 ,_snake_case : int=0.1 ,_snake_case : List[Any]=512 ,_snake_case : str=2 ,_snake_case : Optional[Any]=0.02 ,_snake_case : Optional[Any]=1e-12 ,_snake_case : Dict=0 ,_snake_case : Optional[int]="absolute" ,_snake_case : int=None ,_snake_case : Optional[Any]=4 ,_snake_case : List[Any]=1_024 ,**_snake_case : Dict ,) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ ,**snake_case__ )
lowercase__ : str = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : List[Any] = position_embedding_type
lowercase__ : str = classifier_dropout
lowercase__ : Dict = channel_shrink_ratio
lowercase__ : str = max_ad_position_embeddings
| 560
|
from __future__ import annotations
import numpy as np
def __magic_name__ ( lowercase ) -> Tuple:
"""simple docstring"""
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 458
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466
| 0
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase__ : Optional[int] =10
def a__ ( A__, A__, A__, A__ ):
for i in range(A_, A_ ):
if array[i] == target:
return i
return -1
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : int = len(A_ )
while left <= right:
if right - left < precision:
return lin_search(A_, A_, A_, A_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_ : str = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_ : Tuple = two_third + 1
else:
SCREAMING_SNAKE_CASE_ : List[str] = one_third + 1
SCREAMING_SNAKE_CASE_ : Dict = two_third - 1
else:
return -1
def a__ ( A__, A__, A__, A__ ):
if left < right:
if right - left < precision:
return lin_search(A_, A_, A_, A_ )
SCREAMING_SNAKE_CASE_ : List[Any] = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(A_, one_third - 1, A_, A_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, A_, A_, A_ )
else:
return rec_ternary_search(one_third + 1, two_third - 1, A_, A_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : List[str] =input('Enter numbers separated by comma:\n').strip()
lowerCAmelCase__ : Optional[int] =[int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCAmelCase__ : List[Any] =int(input('Enter the number to be found in the list:\n').strip())
lowerCAmelCase__ : List[str] =ite_ternary_search(collection, target)
lowerCAmelCase__ : Dict =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 101
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = ConsistencyModelPipeline
a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _lowerCAmelCase ( self , lowerCAmelCase_=False ):
'''simple docstring'''
if class_cond:
a_ : List[str] = self.dummy_cond_unet
else:
a_ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith("""mps""" ):
a_ : int = torch.manual_seed(lowerCAmelCase_ )
else:
a_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Union[str, Any] = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Optional[Any] = self.get_dummy_components()
a_ : Any = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Tuple = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : int = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Tuple = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : str = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Dict = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = 0
a_ : Union[str, Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : str = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : List[Any] = self.get_dummy_components()
a_ : Optional[int] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : List[str] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Any = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : List[Any] = 1
a_ : int = None
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : str = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Optional[Any] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : Any = 1
a_ : Optional[int] = None
a_ : Optional[Any] = 0
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : str = image[0, -3:, -3:, -1]
a_ : int = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_=False , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
a_ : List[str] = torch.manual_seed(lowerCAmelCase_ )
a_ : int = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
a_ : int = self.get_fixed_latents(seed=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ , shape=lowerCAmelCase_ )
a_ : str = latents
return inputs
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
if type(lowerCAmelCase_ ) == str:
a_ : Dict = torch.device(lowerCAmelCase_ )
a_ : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
return latents
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Union[str, Any] = self.get_inputs()
a_ : List[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Dict = image[0, -3:, -3:, -1]
a_ : int = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Tuple = self.get_inputs()
a_ : Optional[Any] = 1
a_ : List[str] = None
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Any = image[0, -3:, -3:, -1]
a_ : Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : str = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Dict = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : List[str] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
a_ : str = 1
a_ : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 577
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Dict ="""big_bird"""
def __init__( self , UpperCAmelCase_=5_03_58 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu_new" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=40_96 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=66 , UpperCAmelCase_="block_sparse" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=64 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , sep_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = rescale_embeddings
lowerCAmelCase = attention_type
lowerCAmelCase = use_bias
lowerCAmelCase = block_size
lowerCAmelCase = num_random_blocks
lowerCAmelCase = classifier_dropout
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
if self.task == "multiple-choice":
lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] ="""maskformer-swin"""
__a : Optional[int] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 33
| 0
|
import logging
from transformers import PretrainedConfig
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class snake_case_ ( A__ ):
'''simple docstring'''
__UpperCamelCase = """bertabs"""
def __init__( self, A_=3_0522, A_=512, A_=6, A_=512, A_=8, A_=512, A_=0.2, A_=6, A_=768, A_=8, A_=2048, A_=0.2, **A_, ) -> List[Any]:
super().__init__(**_UpperCamelCase )
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =max_pos
UpperCAmelCase__ =enc_layers
UpperCAmelCase__ =enc_hidden_size
UpperCAmelCase__ =enc_heads
UpperCAmelCase__ =enc_ff_size
UpperCAmelCase__ =enc_dropout
UpperCAmelCase__ =dec_layers
UpperCAmelCase__ =dec_hidden_size
UpperCAmelCase__ =dec_heads
UpperCAmelCase__ =dec_ff_size
UpperCAmelCase__ =dec_dropout
| 625
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 32
| 0
|
def lowerCamelCase__ ( A__ : int = 1000 ):
'''simple docstring'''
__lowerCamelCase = 2**power
__lowerCamelCase = str(A__ )
__lowerCamelCase = list(A__ )
__lowerCamelCase = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCAmelCase_ = solution(power)
print('Sum of the digits is: ', result)
| 80
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = ["""image_processor""", """tokenizer"""]
snake_case__ = """ViltImageProcessor"""
snake_case__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Any=None , **_SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , _SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : int , *_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 280
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
UpperCamelCase = r'\w+[.]\d+'
UpperCamelCase = re.findall(_UpperCamelCase , _UpperCamelCase)
for pat in pats:
UpperCamelCase = key.replace(_UpperCamelCase , '_'.join(pat.split('.')))
return key
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=42) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase = flax_model.init_weights(PRNGKey(_UpperCamelCase))
UpperCamelCase = flatten_dict(_UpperCamelCase)
UpperCamelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase = rename_key(_UpperCamelCase)
UpperCamelCase = tuple(renamed_pt_key.split('.'))
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.')
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(_UpperCamelCase)
return unflatten_dict(_UpperCamelCase)
| 280
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : str = field(default='''image-classification''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
__a : ClassVar[Features] = Features({'''image''': Image()} )
__a : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__a : str = "image"
__a : str = "labels"
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 522
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__a : Optional[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=lowercase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=lowercase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=lowercase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=lowercase , default='''data/dump''' , help='''The dump file prefix.''' )
__lowercase = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
__lowercase = BertTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__lowercase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__lowercase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__lowercase = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__lowercase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__lowercase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__lowercase = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"{len(lowercase )} examples to process." )
__lowercase = []
__lowercase = 0
__lowercase = 10000
__lowercase = time.time()
for text in data:
__lowercase = F"{bos} {text.strip()} {sep}"
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
rslt.append(lowercase )
iter += 1
if iter % interval == 0:
__lowercase = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
__lowercase = time.time()
logger.info('''Finished binarization''' )
logger.info(F"{len(lowercase )} examples processed." )
__lowercase = F"{args.dump_file}.{args.tokenizer_name}.pickle"
__lowercase = tokenizer.vocab_size
if vocab_size < (1 << 16):
__lowercase = [np.uintaa(lowercase ) for d in rslt]
else:
__lowercase = [np.intaa(lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(lowercase , '''wb''' ) as handle:
pickle.dump(rslt_ , lowercase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 522
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case : List[str] = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def A ( __snake_case: Optional[Any] , __snake_case: Tuple , __snake_case: Any ) -> int:
"""simple docstring"""
__magic_name__ = SavedModel()
__magic_name__ = []
with open(os.path.join(__snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__magic_name__ = json.load(__snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__snake_case )] )
with open(__snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__magic_name__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__magic_name__ = sorted(__snake_case )
__magic_name__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__snake_case )
if strict and len(__snake_case ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__snake_case ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*__snake_case , sep='\n' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=1_2, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
snake_case : Any = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 545
| 0
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = DistilBertTokenizer
_snake_case = DistilBertTokenizerFast
_snake_case = True
@slow
def snake_case ( self ) -> List[Any]:
__lowerCAmelCase : int = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
__lowerCAmelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 123
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123
| 1
|
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: int ) -> int:
'''simple docstring'''
return number | (1 << position)
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 535
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( lowercase__ ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''BlipImageProcessor'''
lowercase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__(self : Any ,SCREAMING_SNAKE_CASE_ : List[str] ,SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = False
super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.image_processor
def __call__(self : int ,SCREAMING_SNAKE_CASE_ : ImageInput = None ,SCREAMING_SNAKE_CASE_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE_ : bool = True ,SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE_ : Optional[int] = None ,SCREAMING_SNAKE_CASE_ : int = 0 ,SCREAMING_SNAKE_CASE_ : Optional[int] = None ,SCREAMING_SNAKE_CASE_ : Optional[bool] = None ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = False ,SCREAMING_SNAKE_CASE_ : bool = True ,SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE_ : str ,) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,stride=SCREAMING_SNAKE_CASE_ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE_ ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE_ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE_ ,return_offsets_mapping=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ ,return_length=SCREAMING_SNAKE_CASE_ ,verbose=SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
return text_encoding
# add pixel_values
lowerCAmelCase = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ )
if text is not None:
lowerCAmelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,stride=SCREAMING_SNAKE_CASE_ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE_ ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE_ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE_ ,return_offsets_mapping=SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ ,return_length=SCREAMING_SNAKE_CASE_ ,verbose=SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE_ )
return encoding_image_processor
def UpperCAmelCase (self : List[str] ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase (self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 535
| 1
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = (CMStochasticIterativeScheduler,)
lowerCAmelCase_ = 10
def __a ( self : Tuple , **_lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
config.update(**UpperCAmelCase__ )
return config
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0](**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __a ( self : Tuple ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __a ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase__ )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = 1
scheduler.set_timesteps(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase__ ):
# 1. scale model input
SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = [1_06, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase__ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 721
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0, 0
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 379
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a : List[Any] = logging.get_logger(__name__)
a : str = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Any = 'imagegpt'
a : Optional[Any] = ['past_key_values']
a : Tuple = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Dict=512 + 1 , __lowercase : int=32 * 32 , __lowercase : List[str]=512 , __lowercase : Dict=24 , __lowercase : Optional[int]=8 , __lowercase : Tuple=None , __lowercase : Tuple="quick_gelu" , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : List[Any]=1e-5 , __lowercase : Optional[Any]=0.02 , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=False , __lowercase : Union[str, Any]=False , __lowercase : Any=False , **__lowercase : Optional[Any] , ) -> List[Any]:
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Union[str, Any] = n_positions
__UpperCAmelCase : int = n_embd
__UpperCAmelCase : int = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : Tuple = n_inner
__UpperCAmelCase : Optional[Any] = activation_function
__UpperCAmelCase : Tuple = resid_pdrop
__UpperCAmelCase : Optional[int] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : List[str] = scale_attn_weights
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : Optional[int] = scale_attn_by_inverse_layer_idx
__UpperCAmelCase : Optional[int] = reorder_and_upcast_attn
__UpperCAmelCase : Optional[int] = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : "FeatureExtractionMixin" , __lowercase : int = 1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 32 , __lowercase : int = 32 , ) -> Mapping[str, Any]:
__UpperCAmelCase : str = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Any = dict(preprocessor(images=__lowercase , return_tensors=__lowercase ) )
return inputs
| 63
|
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__SCREAMING_SNAKE_CASE :Optional[Any] = '''bert-base-cased'''
__SCREAMING_SNAKE_CASE :Any = '''google/pegasus-xsum'''
__SCREAMING_SNAKE_CASE :List[str] = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__SCREAMING_SNAKE_CASE :Dict = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__SCREAMING_SNAKE_CASE :Dict = '''patrickvonplaten/t5-tiny-random'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''sshleifer/bart-tiny-random'''
__SCREAMING_SNAKE_CASE :List[str] = '''sshleifer/tiny-mbart'''
__SCREAMING_SNAKE_CASE :Dict = '''sshleifer/tiny-marian-en-de'''
def UpperCAmelCase_ ( __lowercase : Path , __lowercase : list ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = "\n".join(__lowercase )
Path(__lowercase ).open("w" ).writelines(__lowercase )
def UpperCAmelCase_ ( __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowercase , f'{split}.source' ) , __lowercase )
_dump_articles(os.path.join(__lowercase , f'{split}.target' ) , __lowercase )
return tmp_dir
class A_ ( lowerCAmelCase_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowercase ( self : Dict , snake_case_ : List[Any] ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
_UpperCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
_UpperCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
_UpperCAmelCase = 4
_UpperCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_UpperCAmelCase , _UpperCAmelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
_UpperCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
_UpperCAmelCase = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_UpperCAmelCase = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowercase ( self : int , snake_case_ : List[str] ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
_UpperCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
_UpperCAmelCase = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
_UpperCAmelCase = 4
_UpperCAmelCase = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=2_0 , max_target_length=snake_case_ , )
_UpperCAmelCase = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowercase ( self : Any ):
_UpperCAmelCase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
_UpperCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_UpperCAmelCase = tmp_dir.joinpath("train.source" ).open().readlines()
_UpperCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 1_2_8 , snake_case_ )
_UpperCAmelCase = {x.name for x in tmp_dir.iterdir()}
_UpperCAmelCase = {x.name for x in save_dir.iterdir()}
_UpperCAmelCase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def lowercase ( self : Optional[int] ):
if not FAIRSEQ_AVAILABLE:
return
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_dataset(max_len=6_4 )
_UpperCAmelCase = 6_4
_UpperCAmelCase = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
_UpperCAmelCase = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
_UpperCAmelCase = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for batch in data_loader:
_UpperCAmelCase = batch["input_ids"].shape
_UpperCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_UpperCAmelCase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(f'too many tokens in {len(snake_case_ )} batches' )
def lowercase ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_dataset(max_len=5_1_2 )
_UpperCAmelCase = 2
_UpperCAmelCase = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
_UpperCAmelCase = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
_UpperCAmelCase = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
_UpperCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(snake_case_ : Optional[int] , snake_case_ : Union[str, Any]="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k="labels" ) ) < sum(count_pad_tokens(snake_case_ , k="labels" ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def lowercase ( self : Tuple , snake_case_ : List[Any]=1_0_0_0 , snake_case_ : str=1_2_8 ):
if os.getenv("USE_REAL_DATA" , snake_case_ ):
_UpperCAmelCase = "examples/seq2seq/wmt_en_ro"
_UpperCAmelCase = max_len * 2 * 6_4
if not Path(snake_case_ ).joinpath("train.len" ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
_UpperCAmelCase = "examples/seq2seq/test_data/wmt_en_ro"
_UpperCAmelCase = max_len * 4
save_len_file(snake_case_ , snake_case_ )
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
_UpperCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_dataset()
_UpperCAmelCase = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
_UpperCAmelCase = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowercase ( self : List[str] , snake_case_ : Tuple ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
_UpperCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
_UpperCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_UpperCAmelCase = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
_UpperCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 236
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Union[str, Any] = ["pixel_values"]
def __init__( self , _A = True , _A = None , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
_SCREAMING_SNAKE_CASE =size if size is not None else {'''shortest_edge''': 3_8_4}
_SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A )
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
# Default value set here for backwards compatibility where the value in config is None
_SCREAMING_SNAKE_CASE =crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_SCREAMING_SNAKE_CASE =resample
_SCREAMING_SNAKE_CASE =do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self , _A , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE =size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_SCREAMING_SNAKE_CASE =int(shortest_edge / crop_pct )
_SCREAMING_SNAKE_CASE =get_resize_output_image_size(_A , size=_A , default_to_square=_A )
_SCREAMING_SNAKE_CASE =resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE =crop_pct if crop_pct is not None else self.crop_pct
_SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE =size if size is not None else self.size
_SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A )
_SCREAMING_SNAKE_CASE =make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE =[to_numpy_array(_A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE =[self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE =[self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE =[self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_SCREAMING_SNAKE_CASE =[to_channel_dimension_format(_A , _A ) for image in images]
_SCREAMING_SNAKE_CASE ={'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 165
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list[int] , A : list[int] , A : int ) -> tuple[float, list[float]]:
UpperCAmelCase_ : List[str] = list(range(len(A ) ) )
UpperCAmelCase_ : Union[str, Any] = [v / w for v, w in zip(A , A )]
index.sort(key=lambda A : ratio[i] , reverse=A )
UpperCAmelCase_ : float = 0
UpperCAmelCase_ : list[float] = [0] * len(A )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase_ : Tuple = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase_ : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 541
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCamelCase : Any = tuple[int, int]
class snake_case__ :
def __init__( self : List[str] , _A : set[int] , _A : Mapping[EdgeT, int] ) -> None:
UpperCAmelCase_ : set[int] = vertices
UpperCAmelCase_ : dict[EdgeT, int] = {
(min(_A ), max(_A )): weight for edge, weight in edges.items()
}
def A ( self : Union[str, Any] , _A : EdgeT , _A : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ : List[str] = weight
def A ( self : str ) -> Graph:
UpperCAmelCase_ : Graph = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ : Optional[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ : Dict = edge
UpperCAmelCase_ : Tuple = weight
subgraph.add_edge(_A , _A )
return subgraph
def __UpperCAmelCase ( A : str = "p107_network.txt" ) -> int:
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(A ) )
UpperCAmelCase_ : str = os.path.join(A , A )
UpperCAmelCase_ : dict[EdgeT, int] = {}
UpperCAmelCase_ : list[str]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
with open(A ) as f:
UpperCAmelCase_ : int = f.read().strip().split('''\n''' )
UpperCAmelCase_ : Any = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(A ) ):
for edgea in range(A ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ : Optional[int] = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ : Graph = Graph(set(range(len(A ) ) ) , A )
UpperCAmelCase_ : Graph = graph.prims_algorithm()
UpperCAmelCase_ : int = sum(graph.edges.values() )
UpperCAmelCase_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 541
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Any=1_0_2_4 , _UpperCamelCase : Optional[Any]=1_0_2_4 , _UpperCamelCase : int=False , **_UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = SeqaSeqDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , type_path="""train""" , **lowerCAmelCase__ )
__UpperCAmelCase : Tuple = tok.pad_token_id
def get_lens(_UpperCamelCase : int ):
__UpperCAmelCase : List[str] = tqdm(
DataLoader(lowerCAmelCase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowerCAmelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__UpperCAmelCase : Tuple = []
for batch in dl:
__UpperCAmelCase : int = batch["""input_ids"""].ne(lowerCAmelCase__ ).sum(1 ).tolist()
__UpperCAmelCase : int = batch["""labels"""].ne(lowerCAmelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
max_lens.append(max(lowerCAmelCase__ , lowerCAmelCase__ ) )
else:
max_lens.extend(lowerCAmelCase__ )
return max_lens
__UpperCAmelCase : Tuple = get_lens(lowerCAmelCase__ )
__UpperCAmelCase : Tuple = SeqaSeqDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , type_path="""val""" , **lowerCAmelCase__ )
__UpperCAmelCase : int = get_lens(lowerCAmelCase__ )
pickle_save(lowerCAmelCase__ , train_ds.len_file )
pickle_save(lowerCAmelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 714
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCAmelCase : str = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
UpperCAmelCase : Optional[Any] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCAmelCase : str = 'allenai'
def lowerCamelCase ( _UpperCamelCase : Dict ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = dict((re.sub(R"""@@$""" , """""" , _UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , _UpperCamelCase ), v) for k, v in d.items() )
__UpperCAmelCase : Any = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__UpperCAmelCase : Union[str, Any] = d[k] # restore
return da
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__UpperCAmelCase : Tuple = basename(_UpperCamelCase )
__UpperCAmelCase : List[str] = dirname(_UpperCamelCase )
__UpperCAmelCase : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__UpperCAmelCase : Any = cls.hub_models()
__UpperCAmelCase : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__UpperCAmelCase : Optional[int] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
__UpperCAmelCase : Union[str, Any] = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
__UpperCAmelCase : int = vars(chkpt["""args"""]["""model"""] )
__UpperCAmelCase : Optional[int] = args["""source_lang"""]
__UpperCAmelCase : int = args["""target_lang"""]
__UpperCAmelCase : List[str] = dirname(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = basename(_UpperCamelCase )
# dicts
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCamelCase , f'''dict.{src_lang}.txt''' )
__UpperCAmelCase : str = os.path.join(_UpperCamelCase , f'''dict.{tgt_lang}.txt''' )
__UpperCAmelCase : Dict = Dictionary.load(_UpperCamelCase )
__UpperCAmelCase : Dict = rewrite_dict_keys(src_dict.indices )
__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = os.path.join(_UpperCamelCase , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__UpperCAmelCase : Tuple = True
for k in src_vocab.keys():
if not k.islower():
__UpperCAmelCase : str = False
break
__UpperCAmelCase : int = Dictionary.load(_UpperCamelCase )
__UpperCAmelCase : int = rewrite_dict_keys(tgt_dict.indices )
__UpperCAmelCase : Dict = len(_UpperCamelCase )
__UpperCAmelCase : str = os.path.join(_UpperCamelCase , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
__UpperCAmelCase : Dict = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding="""utf-8""" ) as fin:
__UpperCAmelCase : List[str] = fin.read()
__UpperCAmelCase : int = re.sub(R""" \d+$""" , """""" , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(_UpperCamelCase )
# model config
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCamelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
__UpperCAmelCase : Tuple = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__UpperCAmelCase : Any = 5
__UpperCAmelCase : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__UpperCAmelCase : Union[str, Any] = best_score_hparams[model_dir]["""length_penalty"""]
else:
__UpperCAmelCase : Tuple = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
__UpperCAmelCase : Any = os.path.join(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
__UpperCAmelCase : int = chkpt["""models"""][0]
__UpperCAmelCase : Dict = model.state_dict()
# rename keys to start with 'model.'
__UpperCAmelCase : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__UpperCAmelCase : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : str = FSMTConfig.from_pretrained(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
__UpperCAmelCase : int = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_UpperCamelCase , _UpperCamelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 299
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : TransformeraDModel , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : KarrasDiffusionSchedulers , __lowerCamelCase : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=__lowerCamelCase , vae=__lowerCamelCase , scheduler=__lowerCamelCase )
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
SCREAMING_SNAKE_CASE = dict(sorted(self.labels.items() ) )
def _snake_case ( self : Tuple , __lowerCamelCase : Union[str, List[str]] ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : float = 4.0 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCamelCase , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase , device=self.device ).reshape(-1 )
SCREAMING_SNAKE_CASE = torch.tensor([1000] * batch_size , device=self.device )
SCREAMING_SNAKE_CASE = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE = latent_model_input[: len(__lowerCamelCase ) // 2]
SCREAMING_SNAKE_CASE = torch.cat([half, half] , dim=0 )
SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = t
if not torch.is_tensor(__lowerCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE = latent_model_input.device.type == "mps"
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=__lowerCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
SCREAMING_SNAKE_CASE = self.transformer(
__lowerCamelCase , timestep=__lowerCamelCase , class_labels=__lowerCamelCase ).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(__lowerCamelCase , len(__lowerCamelCase ) // 2 , dim=0 )
SCREAMING_SNAKE_CASE = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE = torch.cat([half_eps, half_eps] , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(__lowerCamelCase , __lowerCamelCase , dim=1 )
else:
SCREAMING_SNAKE_CASE = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = latent_model_input.chunk(2 , dim=0 )
else:
SCREAMING_SNAKE_CASE = latent_model_input
SCREAMING_SNAKE_CASE = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE = self.vae.decode(__lowerCamelCase ).sample
SCREAMING_SNAKE_CASE = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 16
|
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16
| 1
|
from PIL import Image
def lowercase_ ( __snake_case : Image , __snake_case : int ) -> Image:
'''simple docstring'''
snake_case__ :int = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__snake_case : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__UpperCAmelCase : Dict = change_contrast(img, 1_7_0)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 702
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__A : List[str] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : str , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 16
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( snake_case_ : Any ,snake_case_ : List[str] ,snake_case_ : Optional[int] ,snake_case_ : Any ):
'''simple docstring'''
# Initialise PyTorch model
UpperCamelCase : Any = BigBirdConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
UpperCamelCase : str = BigBirdForQuestionAnswering(snake_case_ )
else:
UpperCamelCase : List[str] = BigBirdForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case_ ,snake_case_ ,is_trivia_qa=snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__A : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 499
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 692
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = tempfile.mkdtemp()
snake_case: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: Optional[int] = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Union[str, Any] = self.get_image_processor()
snake_case: List[str] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Tuple = self.get_tokenizer()
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.prepare_image_inputs()
snake_case: List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Tuple = self.prepare_image_inputs()
snake_case: Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: int = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 'Alexandra,T-shirt的价格是15便士。'
snake_case: List[Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 692
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
def _a ( self) -> Tuple:
__snake_case = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'embed_dim'))
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'num_heads'))
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=6_4 , lowercase_=3 , lowercase_=[1_6, 4_8, 9_6] , lowercase_=[1, 3, 6] , lowercase_=[1, 2, 1_0] , lowercase_=[7, 3, 3] , lowercase_=[4, 2, 2] , lowercase_=[2, 1, 1] , lowercase_=[2, 2, 2] , lowercase_=[False, False, True] , lowercase_=[0.0, 0.0, 0.0] , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=True , lowercase_=2 , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_sizes
__snake_case = patch_stride
__snake_case = patch_padding
__snake_case = is_training
__snake_case = use_labels
__snake_case = num_labels
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = num_heads
__snake_case = stride_kv
__snake_case = depth
__snake_case = cls_token
__snake_case = attention_drop_rate
__snake_case = initializer_range
__snake_case = layer_norm_eps
def _a ( self) -> Tuple:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
# create a random int32 tensor of given shape
__snake_case = ids_tensor([self.batch_size] , self.num_labels)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = TFCvtModel(config=__SCREAMING_SNAKE_CASE)
__snake_case = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE)
__snake_case = (self.image_size, self.image_size)
__snake_case = image_size[0], image_size[1]
for i in range(len(self.depth)):
__snake_case = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
__snake_case = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self.num_labels
__snake_case = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE)
__snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _a ( self) -> int:
__snake_case = self.prepare_config_and_inputs()
__snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__UpperCAmelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Dict:
__snake_case = TFCvtModelTester(self)
__snake_case = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7)
def _a ( self) -> List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions')
def _a ( self) -> int:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds')
def _a ( self) -> Any:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings')
def _a ( self) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def _a ( self) -> Optional[Any]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def _a ( self) -> str:
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8')
def _a ( self) -> Tuple:
__snake_case = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32')
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__SCREAMING_SNAKE_CASE)
__snake_case = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _a ( self) -> str:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
__snake_case = model_class(__SCREAMING_SNAKE_CASE)
__snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__snake_case = outputs.hidden_states
__snake_case = len(self.model_tester.depth)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _a ( self) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _a ( self) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _a ( self) -> Optional[Any]:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def A ( ) -> List[Any]:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> List[str]:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def _a ( self) -> Any:
__snake_case = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='tf')
# forward pass
__snake_case = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__snake_case = tf.constant([0.9285, 0.9015, -0.3150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4))
| 313
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def _UpperCAmelCase ( __A : str , __A : Optional[Any]=1_00 , __A : int=" " ):
a_ : Optional[int] = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def _UpperCAmelCase ( __A : dict ):
a_ , a_ : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else '''''' )
texts.append(__A )
return {"title": titles, "text": texts}
def _UpperCAmelCase ( __A : dict , __A : DPRContextEncoder , __A : DPRContextEncoderTokenizerFast ):
a_ : int = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
a_ : List[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _UpperCAmelCase ( __A : "RagExampleArguments" , __A : "ProcessingArguments" , __A : "IndexHnswArguments" , ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a_ : List[str] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a_ : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
a_ : str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
a_ : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a_ : Union[str, Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
a_ : Union[str, Any] = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
a_ : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a_ : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__A )
# And save the index
a_ : Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
snake_case__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
snake_case__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
snake_case__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
snake_case__ = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 466
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _lowerCAmelCase ( _lowercase ):
def __get__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCAmelCase__ : Dict = '''__cached_''' + self.fget.__name__
lowerCAmelCase__ : Any = getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if cached is None:
lowerCAmelCase__ : Any = self.fget(__UpperCAmelCase )
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return cached
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if is_torch_fx_proxy(UpperCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase , np.ndarray )
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return isinstance(UpperCamelCase , np.ndarray )
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
'''simple docstring'''
return _is_numpy(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase , torch.Tensor )
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase , torch.device )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase , UpperCamelCase ):
if hasattr(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , UpperCamelCase )
else:
return False
return isinstance(UpperCamelCase , torch.dtype )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
import tensorflow as tf
return isinstance(UpperCamelCase , tf.Tensor )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCamelCase )
return type(UpperCamelCase ) == tf.Tensor
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase , jnp.ndarray )
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase ) for k, v in obj.items()}
elif isinstance(UpperCamelCase , (list, tuple) ):
return [to_py_obj(UpperCamelCase ) for o in obj]
elif is_tf_tensor(UpperCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase ):
return np.asarray(UpperCamelCase ).tolist()
elif isinstance(UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCamelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase ) for k, v in obj.items()}
elif isinstance(UpperCamelCase , (list, tuple) ):
return np.array(UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase ):
return np.asarray(UpperCamelCase )
else:
return obj
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = fields(self )
# Safety and consistency checks
if not len(__UpperCAmelCase ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase__ : int = getattr(self , class_fields[0].name )
lowerCAmelCase__ : str = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = first_field.items()
lowerCAmelCase__ : Tuple = True
else:
try:
lowerCAmelCase__ : List[Any] = iter(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = True
except TypeError:
lowerCAmelCase__ : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__UpperCAmelCase ):
if (
not isinstance(__UpperCAmelCase , (list, tuple) )
or not len(__UpperCAmelCase ) == 2
or not isinstance(element[0] , __UpperCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ : List[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase__ : Any = element[1]
elif first_field is not None:
lowerCAmelCase__ : List[str] = first_field
else:
for field in class_fields:
lowerCAmelCase__ : List[str] = getattr(self , field.name )
if v is not None:
lowerCAmelCase__ : Optional[Any] = v
def __delitem__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , __UpperCAmelCase , __UpperCAmelCase ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__UpperCAmelCase , __UpperCAmelCase )
super().__setattr__(__UpperCAmelCase , __UpperCAmelCase )
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
# Will raise a KeyException if needed
super().__setitem__(__UpperCAmelCase , __UpperCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
return tuple(self[k] for k in self.keys() )
class _lowerCAmelCase ( _lowercase , _lowercase ):
@classmethod
def __magic_name__( cls , __UpperCAmelCase ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class _lowerCAmelCase ( _lowercase ):
A__ = 'longest'
A__ = 'max_length'
A__ = 'do_not_pad'
class _lowerCAmelCase ( _lowercase ):
A__ = 'pt'
A__ = 'tf'
A__ = 'np'
A__ = 'jax'
class _lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = context_managers
lowerCAmelCase__ : List[Any] = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(__UpperCAmelCase )
def __exit__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
self.stack.__exit__(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ : List[Any] = infer_framework(UpperCamelCase )
if framework == "tf":
lowerCAmelCase__ : List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : str = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = model_class.__name__
lowerCAmelCase__ : Dict = infer_framework(UpperCamelCase )
if framework == "tf":
lowerCAmelCase__ : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : List[str] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = "" , UpperCamelCase = "." ) -> Optional[int]:
'''simple docstring'''
def _flatten_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
for k, v in d.items():
lowerCAmelCase__ : Optional[int] = str(UpperCamelCase ) + delimiter + str(UpperCamelCase ) if parent_key else k
if v and isinstance(UpperCamelCase , UpperCamelCase ):
yield from flatten_dict(UpperCamelCase , UpperCamelCase , delimiter=UpperCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
@contextmanager
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = False ) -> List[str]:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=None ) -> Tuple:
'''simple docstring'''
if is_numpy_array(UpperCamelCase ):
return np.transpose(UpperCamelCase , axes=UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.T if axes is None else array.permute(*UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.transpose(UpperCamelCase , perm=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.transpose(UpperCamelCase , axes=UpperCamelCase )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase )}.""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
'''simple docstring'''
if is_numpy_array(UpperCamelCase ):
return np.reshape(UpperCamelCase , UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.reshape(*UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.reshape(UpperCamelCase , UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.reshape(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase )}.""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=None ) -> Dict:
'''simple docstring'''
if is_numpy_array(UpperCamelCase ):
return np.squeeze(UpperCamelCase , axis=UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase , axis=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.squeeze(UpperCamelCase , axis=UpperCamelCase )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase )}.""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase ):
return np.expand_dims(UpperCamelCase , UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.unsqueeze(dim=UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase , axis=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.expand_dims(UpperCamelCase , axis=UpperCamelCase )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" )
def __lowerCAmelCase ( UpperCamelCase ) -> int:
'''simple docstring'''
if is_numpy_array(UpperCamelCase ):
return np.size(UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.numel()
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.size(UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(UpperCamelCase , (tuple, list) ):
lowerCAmelCase__ : Optional[Any] = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ : Tuple = F"""{repo_id}--{value}"""
return auto_map
def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
for base_class in inspect.getmro(UpperCamelCase ):
lowerCAmelCase__ : Any = base_class.__module__
lowerCAmelCase__ : Union[str, Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 714
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
lowerCAmelCase__ : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCAmelCase__ : str = model(__UpperCAmelCase )['''last_hidden_state''']
lowerCAmelCase__ : List[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase__ : int = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 470
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self) -> Optional[Any]:
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_)
yield node.data
UpperCamelCase = node.next_node
@property
def UpperCAmelCase__ ( self) -> bool:
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Node(1)
SCREAMING_SNAKE_CASE_ = Node(2)
SCREAMING_SNAKE_CASE_ = Node(3)
SCREAMING_SNAKE_CASE_ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE_ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE_ = Node(5)
SCREAMING_SNAKE_CASE_ = Node(6)
SCREAMING_SNAKE_CASE_ = Node(5)
SCREAMING_SNAKE_CASE_ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE_ = Node(1)
print(root_node.has_loop) # False
| 34
|
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = os.path.join(args.tf_model_dir , '''parameters.json''' )
_snake_case = json.loads(open(lowerCAmelCase_ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
_snake_case = args.output + '''.pt'''
_snake_case = OrderedDict()
with tf.device('''/CPU:0''' ):
_snake_case = tf.train.load_checkpoint(args.tf_model_dir )
_snake_case = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_snake_case = reader.get_tensor(lowerCAmelCase_ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
_snake_case = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
_snake_case = 8
_snake_case = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.startswith('''model/moe''' ):
_snake_case = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
_snake_case = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/softmlp/kernel''' ):
_snake_case = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
_snake_case = key_name[-9:-7]
for i in range(16 ):
_snake_case = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
_snake_case = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.startswith('''model/mlp''' ):
_snake_case = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
_snake_case = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/p1/bias''' ):
_snake_case = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/p2/kernel''' ):
_snake_case = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/p2/bias''' ):
_snake_case = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.startswith('''model/ln''' ):
_snake_case = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_snake_case = '''model.blocks.%d.feed_forward.norm.bias''' % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/g''' ):
_snake_case = '''model.blocks.%d.feed_forward.norm.weight''' % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.startswith('''model/att''' ):
_snake_case = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
_snake_case = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_snake_case = state[:, 0, :, :]
_snake_case = state[:, 1, :, :]
_snake_case = state[:, 2, :, :]
_snake_case = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
_snake_case = torch.tensor(lowerCAmelCase_ )
_snake_case = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
_snake_case = torch.tensor(lowerCAmelCase_ )
_snake_case = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/o/kernel''' ):
_snake_case = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
_snake_case = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.startswith('''model/an''' ):
_snake_case = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
_snake_case = '''model.blocks.%d.self_attn.norm.bias''' % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.endswith('''/g''' ):
_snake_case = '''model.blocks.%d.self_attn.norm.weight''' % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(lowerCAmelCase_ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
_snake_case = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
_snake_case = '''model.%s.weight''' % nlayer
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(lowerCAmelCase_ )
if key_name.startswith('''model/wte''' ):
_snake_case = '''lm_head.weight'''
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name.startswith('''model/wob''' ):
_snake_case = '''final_logits_bias'''
_snake_case = vnp.copy() # same in embedded
_snake_case = state.reshape((1, -1) )
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name == "model/dense/kernel":
_snake_case = '''model.last_project.weight'''
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(lowerCAmelCase_ )
elif key_name == "model/dense_1/bias":
_snake_case = '''model.last_project.bias'''
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , args.output )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 404
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ = 1000 ) -> int:
return sum(e for e in range(3 , lowerCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 404
| 1
|
import math
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCamelCase_ : Optional[int] = """Enter the base and the power separated by a comma: """
lowerCamelCase_ , lowerCamelCase_ : List[Any] = map(int, input(prompt).split(""","""))
lowerCamelCase_ , lowerCamelCase_ : Tuple = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCamelCase_ : Optional[Any] = res(xa, ya)
lowerCamelCase_ : Dict = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 548
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _UpperCamelCase ( _A ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: Optional[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCamelCase_: List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCamelCase_: int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task="""fill-mask""" , model=snake_case_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Tuple = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCamelCase_: Optional[int] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: int = """1"""
UpperCamelCase_: Optional[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: str = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCamelCase_: Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCamelCase_: Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCamelCase_: Optional[int] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task="""fill-mask""" , model=snake_case_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCamelCase_: List[str] = self.get_env()
UpperCamelCase_: List[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: Dict = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
UpperCamelCase_: Optional[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCamelCase_: Union[str, Any] = self.get_env()
UpperCamelCase_: List[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: Union[str, Any] = """1"""
UpperCamelCase_: int = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = """
from transformers import pipeline
"""
UpperCamelCase_: Union[str, Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
UpperCamelCase_: Optional[Any] = self.get_env()
UpperCamelCase_: int = """1"""
UpperCamelCase_: int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
UpperCamelCase_: Any = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = """
from transformers import AutoModel
"""
UpperCamelCase_: Dict = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCamelCase_: Optional[Any] = self.get_env()
UpperCamelCase_: int = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: Union[str, Any] = """1"""
UpperCamelCase_: List[str] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 548
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCAmelCase = logging.getLogger(__name__)
def lowercase ( _a ,_a ,_a = None ,_a = None ,_a = None ,_a = None ,_a = None ,_a = False ,) -> List[Any]:
UpperCAmelCase_: Any = bnb_quantization_config.load_in_abit
UpperCAmelCase_: Union[str, Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
UpperCAmelCase_: Any = []
# custom device map
if isinstance(_a ,_a ) and len(device_map.keys() ) > 1:
UpperCAmelCase_: str = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase_: List[str] = get_keys_to_not_convert(_a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_a )
UpperCAmelCase_: List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase_: Dict = []
UpperCAmelCase_: Union[str, Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_a )
# compatibility with peft
UpperCAmelCase_: Any = load_in_abit
UpperCAmelCase_: Tuple = load_in_abit
UpperCAmelCase_: List[Any] = get_parameter_device(_a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
UpperCAmelCase_: List[Any] = replace_with_bnb_layers(_a ,_a ,modules_to_not_convert=_a )
# convert param to the right dtype
UpperCAmelCase_: Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase_: Any = name.replace(".weight" ,"" ).replace(".bias" ,"" )
UpperCAmelCase_: Tuple = getattr(_a ,_a ,_a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_a ):
param.to(_a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
UpperCAmelCase_: Union[str, Any] = replace_with_bnb_layers(
_a ,_a ,modules_to_not_convert=_a )
UpperCAmelCase_: Tuple = get_quantized_model_device_map(
_a ,_a ,_a ,max_memory=_a ,no_split_module_classes=_a ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: int = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_a ,_a ,_a ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_a ,offload_state_dict=_a ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_a ,device_map=_a ,offload_dir=_a )
def lowercase ( _a ,_a ,_a=None ,_a=None ,_a=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase_: List[str] = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_a ,_a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
UpperCAmelCase_: str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase_: List[str] = {}
UpperCAmelCase_: str = special_dtypes
UpperCAmelCase_: List[Any] = no_split_module_classes
UpperCAmelCase_: Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase_: List[Any] = get_balanced_memory(
_a ,low_zero=(device_map == "balanced_low_0") ,max_memory=_a ,**_a ,)
UpperCAmelCase_: List[str] = max_memory
UpperCAmelCase_: Optional[Any] = infer_auto_device_map(_a ,**_a )
if isinstance(_a ,_a ):
# check if don't have any quantized module on the cpu
UpperCAmelCase_: str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase_: Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowercase ( _a ,_a ,_a=None ,_a=None ) -> Union[str, Any]:
if modules_to_not_convert is None:
UpperCAmelCase_: str = []
UpperCAmelCase_ , UpperCAmelCase_: List[str] = _replace_with_bnb_layers(
_a ,_a ,_a ,_a )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase ( _a ,_a ,_a=None ,_a=None ,) -> str:
UpperCAmelCase_: Tuple = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase_: str = []
current_key_name.append(_a )
if isinstance(_a ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase_: Optional[int] = ".".join(_a )
UpperCAmelCase_: Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase_: Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase_: str = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_a ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase_: List[str] = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
UpperCAmelCase_: Dict = module.weight.data
if module.bias is not None:
UpperCAmelCase_: Any = module.bias.data
bnb_module.requires_grad_(_a )
setattr(_a ,_a ,_a )
UpperCAmelCase_: List[Any] = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = _replace_with_bnb_layers(
_a ,_a ,_a ,_a )
UpperCAmelCase_: List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( _a ) -> Optional[int]:
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase_: Union[str, Any] = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase_: Optional[Any] = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a ,_a ):
UpperCAmelCase_: List[Any] = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCAmelCase_: List[str] = sum(_a ,[] )
UpperCAmelCase_: Tuple = len(_a ) > 0
# Check if it is a base model
UpperCAmelCase_: List[Any] = False
if hasattr(_a ,"base_model_prefix" ):
UpperCAmelCase_: Union[str, Any] = not hasattr(_a ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase_: Optional[Any] = list(model.named_children() )
UpperCAmelCase_: List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase_: List[str] = set(_a ) - set(_a )
UpperCAmelCase_: int = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
UpperCAmelCase_: str = [".weight", ".bias"]
UpperCAmelCase_: Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase_: Dict = name.replace(_a ,"" )
filtered_module_names.append(_a )
return filtered_module_names
def lowercase ( _a ) -> int:
for m in model.modules():
if isinstance(_a ,bnb.nn.Linearabit ):
return True
return False
def lowercase ( _a ) -> Optional[Any]:
return next(parameter.parameters() ).device
def lowercase ( _a ,_a ,_a ,_a ,_a ,_a ,_a ) -> Optional[int]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_a ,_a ,0 ,dtype=_a ,value=_a )
UpperCAmelCase_: Dict = param_name
UpperCAmelCase_: str = model
if "." in tensor_name:
UpperCAmelCase_: Optional[int] = tensor_name.split("." )
for split in splits[:-1]:
UpperCAmelCase_: Any = getattr(_a ,_a )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
UpperCAmelCase_: str = new_module
UpperCAmelCase_: Dict = splits[-1]
# offload weights
UpperCAmelCase_: int = False
offload_weight(module._parameters[tensor_name] ,_a ,_a ,index=_a )
if hasattr(module._parameters[tensor_name] ,"SCB" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("weight" ,"SCB" ) ,_a ,index=_a ,)
else:
offload_weight(_a ,_a ,_a ,index=_a )
offload_weight(_a ,param_name.replace("weight" ,"SCB" ) ,_a ,index=_a )
set_module_tensor_to_device(_a ,_a ,"meta" ,dtype=_a ,value=torch.empty(*param.size() ) )
| 306
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase ( _a ,_a ,_a ,_a ,) -> list[float]:
UpperCAmelCase_ , UpperCAmelCase_: Tuple = coefficient_matrix.shape
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase_: List[str] = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_a )
if colsa != 1:
UpperCAmelCase_: Dict = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_a )
if rowsa != rowsa:
UpperCAmelCase_: List[str] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_a )
if len(_a ) != rowsa:
UpperCAmelCase_: int = (
"Number of initial values must be equal to number of rows in coefficient "
f"matrix but received {len(_a )} and {rowsa}"
)
raise ValueError(_a )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCAmelCase_: NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
UpperCAmelCase_ , UpperCAmelCase_: str = table.shape
strictly_diagonally_dominant(_a )
# Iterates the whole matrix for given number of times
for _ in range(_a ):
UpperCAmelCase_: Optional[int] = []
for row in range(_a ):
UpperCAmelCase_: List[str] = 0
for col in range(_a ):
if col == row:
UpperCAmelCase_: List[Any] = table[row][col]
elif col == cols - 1:
UpperCAmelCase_: Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase_: int = (temp + val) / denom
new_val.append(_a )
UpperCAmelCase_: List[Any] = new_val
return [float(_a ) for i in new_val]
def lowercase ( _a ) -> bool:
UpperCAmelCase_ , UpperCAmelCase_: Dict = table.shape
UpperCAmelCase_: List[Any] = True
for i in range(0 ,_a ):
UpperCAmelCase_: str = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowerCAmelCase = 'src/transformers'
_lowerCAmelCase = 'docs/source/en'
_lowerCAmelCase = '.'
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
with open(__lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
A_ : List[Any] = f.readlines()
# Find the start prompt.
A_ : Optional[int] = 0
while not lines[start_index].startswith(__lowercase ):
start_index += 1
start_index += 1
A_ : Optional[Any] = start_index
while not lines[end_index].startswith(__lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowerCAmelCase = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
_lowerCAmelCase = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_lowerCAmelCase = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowerCAmelCase = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" ,__lowercase )
return [m.group(0 ) for m in matches]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Any = 2 if text == "✅" or text == "❌" else len(__lowercase )
A_ : List[str] = (width - text_length) // 2
A_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A_ : Tuple = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
A_ : Tuple = {name: config.replace("""Config""" ,"""""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
A_ : Optional[int] = collections.defaultdict(__lowercase )
A_ : Optional[Any] = collections.defaultdict(__lowercase )
A_ : Union[str, Any] = collections.defaultdict(__lowercase )
A_ : str = collections.defaultdict(__lowercase )
A_ : int = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__lowercase ):
A_ : str = None
if attr_name.endswith("""Tokenizer""" ):
A_ : Union[str, Any] = slow_tokenizers
A_ : str = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
A_ : Dict = fast_tokenizers
A_ : Optional[int] = attr_name[:-1_3]
elif _re_tf_models.match(__lowercase ) is not None:
A_ : str = tf_models
A_ : int = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
A_ : List[str] = flax_models
A_ : Optional[int] = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
A_ : Dict = pt_models
A_ : Dict = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_name_to_prefix.values():
A_ : Dict = True
break
# Try again after removing the last word in the name
A_ : int = "".join(camel_case_split(__lowercase )[:-1] )
# Let's build that table!
A_ : List[str] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
A_ : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
A_ : str = [len(__lowercase ) + 2 for c in columns]
A_ : Optional[Any] = max([len(__lowercase ) for name in model_names] ) + 2
# Build the table per se
A_ : str = "|" + "|".join([_center_text(__lowercase ,__lowercase ) for c, w in zip(__lowercase ,__lowercase )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
A_ : List[Any] = {True: "✅", False: "❌"}
for name in model_names:
A_ : Optional[Any] = model_name_to_prefix[name]
A_ : List[str] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__lowercase ,__lowercase ) for l, w in zip(__lowercase ,__lowercase )] ) + "|\n"
return table
def _lowerCAmelCase ( _lowerCAmelCase=False ):
'''simple docstring'''
A_ : Any = _find_text_in_file(
filename=os.path.join(__lowercase ,"""index.md""" ) ,start_prompt="""<!--This table is updated automatically from the auto modules""" ,end_prompt="""<!-- End table-->""" ,)
A_ : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__lowercase ,"""index.md""" ) ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCAmelCase = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 569
|
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 670
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
A = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
A = self.diffusers_dir
shutil.copy(
os.path.join(snake_case , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def A_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def A_ ( self : Union[str, Any] , snake_case : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Any=None ) -> Optional[int]:
'''simple docstring'''
A = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
A = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A = black.format_str(snake_case , mode=snake_case )
A = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(snake_case , 'w' , newline='\n' ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case )
with open(snake_case , 'r' ) as f:
self.assertTrue(f.read() , snake_case )
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(snake_case , snake_case )
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , snake_case ) , )
# Copy consistency with a really long name
A = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , snake_case , snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , snake_case , overwrite_result=re.sub('DDPM' , 'Test' , snake_case ) , )
| 109
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = LayoutLMTokenizer
lowerCAmelCase_ : Any = LayoutLMTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : str , **snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self : List[str] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
| 109
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] ):
snake_case_ : Tuple = dataset
snake_case_ : Tuple = process
snake_case_ : List[str] = params
def __len__( self : List[str] ):
return len(self.dataset )
def __getitem__( self : Optional[int] , lowercase_ : int ):
snake_case_ : Dict = self.dataset[i]
snake_case_ : Dict = self.process(lowercase_ , **self.params )
return processed
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Any=None ):
snake_case_ : int = loader
snake_case_ : List[str] = infer
snake_case_ : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case_ : Dict = None
snake_case_ : int = loader_batch_size
# Internal bookkeeping
snake_case_ : List[Any] = None
snake_case_ : Optional[int] = None
def __len__( self : List[str] ):
return len(self.loader )
def __iter__( self : Optional[int] ):
snake_case_ : Optional[int] = iter(self.loader )
return self
def _snake_case ( self : Optional[int] ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case_ : Any = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case_ : List[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ , lowercase_ ):
# Convert ModelOutput to tuple first
snake_case_ : Optional[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case_ : int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case_ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case_ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case_ : Dict = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case_ : Optional[Any] = self._loader_batch_data.__class__(lowercase_ )
self._loader_batch_index += 1
return result
def _snake_case ( self : Optional[int] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case_ : Union[str, Any] = next(self.iterator )
snake_case_ : List[Any] = self.infer(lowercase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ , torch.Tensor ):
snake_case_ : Optional[int] = processed
else:
snake_case_ : Optional[Any] = list(processed.keys() )[0]
snake_case_ : List[str] = processed[key]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Optional[int] = len(lowercase_ )
else:
snake_case_ : Any = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
snake_case_ : Any = processed
snake_case_ : Tuple = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Any=None ):
super().__init__(lowercase_ , lowercase_ , lowercase_ )
def __iter__( self : Any ):
snake_case_ : Union[str, Any] = iter(self.loader )
snake_case_ : int = None
return self
def _snake_case ( self : Any ):
if self.subiterator is None:
snake_case_ : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case_ : int = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case_ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
snake_case_ : Dict = next(self.subiterator )
return processed
class _UpperCAmelCase ( lowerCAmelCase__):
def __iter__( self : List[Any] ):
snake_case_ : Optional[int] = iter(self.loader )
return self
def _snake_case ( self : Optional[Any] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case_ : int = self.loader_batch_item()
snake_case_ : Dict = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
while not is_last:
snake_case_ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase_ , torch.Tensor ):
snake_case_ : Optional[Any] = processed
else:
snake_case_ : Union[str, Any] = list(processed.keys() )[0]
snake_case_ : Optional[int] = processed[key]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[Any] = len(lowercase_ )
else:
snake_case_ : str = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ : Optional[int] = observed_batch_size
snake_case_ : Optional[int] = processed
snake_case_ : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case_ : Union[str, Any] = self.loader_batch_item()
snake_case_ : Tuple = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
else:
snake_case_ : Optional[Any] = processed
snake_case_ : List[str] = item.pop('''is_last''' )
accumulator.append(lowercase_ )
return accumulator
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[Any] , lowercase_ : Dataset , lowercase_ : str ):
snake_case_ : Optional[Any] = dataset
snake_case_ : Any = key
def __len__( self : Optional[Any] ):
return len(self.dataset )
def __getitem__( self : Tuple , lowercase_ : int ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[Any] , lowercase_ : Dataset , lowercase_ : str , lowercase_ : str ):
snake_case_ : Union[str, Any] = dataset
snake_case_ : Optional[int] = keya
snake_case_ : List[Any] = keya
def __len__( self : Union[str, Any] ):
return len(self.dataset )
def __getitem__( self : Optional[int] , lowercase_ : List[str] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 123
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCAmelCase ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Dict=True , lowercase_ : int=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=99 , lowercase_ : str=32 , lowercase_ : List[str]=5 , lowercase_ : List[str]=4 , lowercase_ : List[Any]=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Any=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Dict=0.02 , lowercase_ : Any=4 , ):
snake_case_ : str = parent
snake_case_ : List[Any] = batch_size
snake_case_ : int = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : List[Any] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : List[str] = use_labels
snake_case_ : List[Any] = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Any = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : int = num_choices
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Dict = None
if self.use_attention_mask:
snake_case_ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : int = config_and_inputs
snake_case_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
snake_case_ : str = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=lowercase_ )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[int] = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
snake_case_ : Dict = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : List[Any] = model(lowercase_ )[0]
snake_case_ : List[str] = 50000
snake_case_ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , lowercase_ )
snake_case_ : Tuple = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 123
| 1
|
"""simple docstring"""
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 523
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( __UpperCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase ) + 1 ) )
def __UpperCAmelCase ( __UpperCamelCase = 10**6 ):
__lowercase : Optional[int] = 0
__lowercase : Dict = 1
__lowercase : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 523
| 1
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , lowercase ).groups()[0]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=None ):
SCREAMING_SNAKE_CASE : Optional[int] = file_names
SCREAMING_SNAKE_CASE : Tuple = image_transform
SCREAMING_SNAKE_CASE : List[str] = label_to_id
def __len__( self : Optional[int] ):
return len(self.file_names )
def __getitem__( self : Any , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = self.file_names[idx]
SCREAMING_SNAKE_CASE : Tuple = PIL.Image.open(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert("RGB" )
if self.image_transform is not None:
SCREAMING_SNAKE_CASE : Tuple = self.image_transform(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = extract_label(UpperCAmelCase_ )
if self.label_to_id is not None:
SCREAMING_SNAKE_CASE : Dict = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if args.with_tracking:
SCREAMING_SNAKE_CASE : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Dict = config["lr"]
SCREAMING_SNAKE_CASE : List[str] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : List[str] = config["image_size"]
if not isinstance(lowercase , (list, tuple) ):
SCREAMING_SNAKE_CASE : Any = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE : int = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
SCREAMING_SNAKE_CASE : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
SCREAMING_SNAKE_CASE : Tuple = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
SCREAMING_SNAKE_CASE : Union[str, Any] = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
SCREAMING_SNAKE_CASE : List[str] = [extract_label(lowercase ) for fname in file_names]
SCREAMING_SNAKE_CASE : Dict = list(set(lowercase ) )
id_to_label.sort()
SCREAMING_SNAKE_CASE : str = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.permutation(len(lowercase ) )
SCREAMING_SNAKE_CASE : List[Any] = int(0.8 * len(lowercase ) )
SCREAMING_SNAKE_CASE : int = random_perm[:cut]
SCREAMING_SNAKE_CASE : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
SCREAMING_SNAKE_CASE : List[str] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
SCREAMING_SNAKE_CASE : int = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
SCREAMING_SNAKE_CASE : List[Any] = Compose([Resize(lowercase ), ToTensor()] )
SCREAMING_SNAKE_CASE : Union[str, Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Tuple = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
SCREAMING_SNAKE_CASE : Any = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Union[str, Any] = create_model("resnet50d" , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : int = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
SCREAMING_SNAKE_CASE : int = False
for param in model.get_classifier().parameters():
SCREAMING_SNAKE_CASE : List[str] = True
# We normalize the batches of images to be a bit faster.
SCREAMING_SNAKE_CASE : str = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
SCREAMING_SNAKE_CASE : str = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
SCREAMING_SNAKE_CASE : str = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE : Any = 0
# We also need to keep track of the starting epoch so files are named properly
SCREAMING_SNAKE_CASE : Dict = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE : List[str] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
SCREAMING_SNAKE_CASE : Optional[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
SCREAMING_SNAKE_CASE : Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
SCREAMING_SNAKE_CASE : str = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
SCREAMING_SNAKE_CASE : List[Any] = int(training_difference.replace("epoch_" , "" ) ) + 1
SCREAMING_SNAKE_CASE : Dict = None
else:
SCREAMING_SNAKE_CASE : str = int(training_difference.replace("step_" , "" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
SCREAMING_SNAKE_CASE : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
SCREAMING_SNAKE_CASE : List[Any] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
SCREAMING_SNAKE_CASE : Union[str, Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE : int = (batch["image"] - mean) / std
SCREAMING_SNAKE_CASE : Dict = model(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.functional.cross_entropy(lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : str = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE : Optional[int] = (batch["image"] - mean) / std
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(lowercase )
SCREAMING_SNAKE_CASE : Tuple = outputs.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["label"]) )
SCREAMING_SNAKE_CASE : Any = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
SCREAMING_SNAKE_CASE : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE : List[Any] = F'''epoch_{epoch}'''
if args.output_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=lowercase , default=lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=lowercase , default=lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : int = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 62
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_: Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
lowercase_: str = 5
lowercase_: int = 10
@require_sentencepiece
@require_tokenizers
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = SpeechaTextTokenizer
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = True
def lowercase ( self : Tuple ):
super().setUp()
snake_case__ : str = sp.SentencePieceProcessor()
spm_model.Load(__a )
snake_case__ : List[str] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__a ) )]
snake_case__ : Dict = dict(zip(__a , range(len(__a ) ) ) )
snake_case__ : Optional[Any] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
snake_case__ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : int ):
snake_case__ : Any = """<pad>"""
snake_case__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowercase ( self : Any ):
snake_case__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__a ) , 1_0_0_1 )
def lowercase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def lowercase ( self : Dict ):
snake_case__ : List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
snake_case__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
snake_case__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase ( self : List[str] ):
# fmt: off
snake_case__ : Union[str, Any] = {"""input_ids""": [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'valhalla/s2t_mustc_multilinguial_medium'
__UpperCamelCase : Union[str, Any] = 'C\'est trop cool'
__UpperCamelCase : List[str] = 'Esto es genial'
@classmethod
def lowercase ( cls : str ):
snake_case__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase ( self : List[str] ):
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 1_1 )
def lowercase ( self : List[str] ):
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def lowercase ( self : List[Any] ):
self.assertIn(__a , self.tokenizer.all_special_ids )
snake_case__ : Union[str, Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
snake_case__ : Optional[int] = self.tokenizer.decode(__a , skip_special_tokens=__a )
snake_case__ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowercase ( self : str ):
snake_case__ : Optional[int] = """fr"""
snake_case__ : List[Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase ( self : int ):
snake_case__ : Any = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case__ : Dict = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 648
| 0
|
'''simple docstring'''
import string
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =''
for i in sequence:
__lowercase =ord(_lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =string.ascii_letters
__lowercase =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowerCAmelCase )] if c in letters else c for c in sequence )
def _A ( ):
"""simple docstring"""
from timeit import timeit
print('Running performance benchmarks...' )
__lowercase ='from string import printable ; from __main__ import atbash, atbash_slow'
print(f"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_lowerCAmelCase )} seconds""" )
print(f"""> atbash(): {timeit('atbash(printable)' , setup=_lowerCAmelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 454
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__lowercase =DatasetInfosDict.from_directory(_lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =str(_lowerCAmelCase )
dataset_info.write_to_directory(_lowerCAmelCase )
__lowercase =DatasetInfo.from_directory(_lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCAmelCase , 'dataset_info.json' ) )
def _A ( ):
"""simple docstring"""
__lowercase =DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__lowercase =dataset_info._to_yaml_dict()
assert sorted(_lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowercase =yaml.safe_dump(_lowerCAmelCase )
__lowercase =yaml.safe_load(_lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def _A ( ):
"""simple docstring"""
__lowercase =DatasetInfo()
__lowercase =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =str(_lowerCAmelCase )
dataset_infos_dict.write_to_directory(_lowerCAmelCase )
__lowercase =DatasetInfosDict.from_directory(_lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowercase =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowercase =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCAmelCase , 'README.md' ) )
| 454
| 1
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a__ : Any = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a__ : Optional[int] = """main"""
# Default branch name
a__ : Dict = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
a__ : List[str] = """aaaaaaa"""
# This commit does not exist, so we should 404.
a__ : Optional[Any] = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
a__ : int = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def snake_case ():
'''simple docstring'''
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def snake_case ():
'''simple docstring'''
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _UpperCamelCase ( self : int , a_ : str ):
"""simple docstring"""
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _UpperCamelCase ( self : List[Any] , a_ : List[str] ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _UpperCamelCase ( self : Optional[int] , a_ : List[Any] ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(find_labels(a_ ) , ["""labels"""] )
self.assertEqual(find_labels(a_ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(a_ ) , ["""start_positions""", """end_positions"""] )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(a_ ) , ["""labels"""] )
@require_tf
def _UpperCamelCase ( self : int ):
"""simple docstring"""
self.assertEqual(find_labels(a_ ) , ["""labels"""] )
self.assertEqual(find_labels(a_ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(a_ ) , ["""start_positions""", """end_positions"""] )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(a_ ) , ["""labels"""] )
@require_flax
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
self.assertEqual(find_labels(a_ ) , [] )
self.assertEqual(find_labels(a_ ) , [] )
self.assertEqual(find_labels(a_ ) , [] )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(a_ ) , [] )
| 165
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case (UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[int]=1e-12 ):
'''simple docstring'''
lowerCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase , axis=1 ) , a_min=UpperCamelCase ) ).T
lowerCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase , axis=1 ) , a_min=UpperCamelCase ) ).T
return jnp.matmul(UpperCamelCase , norm_emb_a.T )
class lowercase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = jnp.floataa
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = FlaxCLIPVisionModule(self.config.vision_config )
lowerCamelCase__ = nn.Dense(self.config.projection_dim , use_bias=a_ , dtype=self.dtype )
lowerCamelCase__ = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCamelCase__ = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCamelCase__ = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowerCamelCase__ = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[Any] , a_ : int ):
"""simple docstring"""
lowerCamelCase__ = self.vision_model(a_ )[1]
lowerCamelCase__ = self.visual_projection(a_ )
lowerCamelCase__ = jax_cosine_distance(a_ , self.special_care_embeds )
lowerCamelCase__ = jax_cosine_distance(a_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCamelCase__ = 0.0
lowerCamelCase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCamelCase__ = jnp.round(a_ , 3 )
lowerCamelCase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=a_ )
# Use a lower threshold if an image has any special care concept
lowerCamelCase__ = is_special_care * 0.0_1
lowerCamelCase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCamelCase__ = jnp.round(a_ , 3 )
lowerCamelCase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = CLIPConfig
snake_case_ = 'clip_input'
snake_case_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[Any] , a_ : CLIPConfig , a_ : Optional[Tuple] = None , a_ : int = 0 , a_ : jnp.dtype = jnp.floataa , a_ : bool = True , **a_ : Union[str, Any] , ):
"""simple docstring"""
if input_shape is None:
lowerCamelCase__ = (1, 2_24, 2_24, 3)
lowerCamelCase__ = self.module_class(config=a_ , dtype=a_ , **a_ )
super().__init__(a_ , a_ , input_shape=a_ , seed=a_ , dtype=a_ , _do_init=_do_init )
def _UpperCamelCase ( self : Optional[int] , a_ : jax.random.KeyArray , a_ : Tuple , a_ : FrozenDict = None ):
"""simple docstring"""
lowerCamelCase__ = jax.random.normal(a_ , a_ )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(a_ )
lowerCamelCase__ = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCamelCase__ = self.module.init(a_ , a_ )["""params"""]
return random_params
def __call__( self : Union[str, Any] , a_ : Tuple , a_ : dict = None , ):
"""simple docstring"""
lowerCamelCase__ = jnp.transpose(a_ , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(a_ , dtype=jnp.floataa ) , rngs={} , )
| 165
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a =logging.get_logger(__name__)
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 720
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a =logging.get_logger(__name__)
a ={'vocab_file': 'vocab.txt'}
a ={
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a ={
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(__lowerCAmelCase , "r" ) as f:
lowerCamelCase__ =f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<unk>" , _lowerCamelCase="<cls>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase="<eos>" , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowerCamelCase__ =load_vocab_file(_lowerCamelCase )
lowerCamelCase__ =dict(enumerate(self.all_tokens ) )
lowerCamelCase__ ={tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase__ =unk_token
lowerCamelCase__ =cls_token
lowerCamelCase__ =pad_token
lowerCamelCase__ =mask_token
lowerCamelCase__ =eos_token
lowerCamelCase__ =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _a ( self , _lowerCamelCase ):
return self._id_to_token.get(_lowerCamelCase , self.unk_token )
def _a ( self , _lowerCamelCase ):
return self._token_to_id.get(_lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def _a ( self , _lowerCamelCase , **_lowerCamelCase ):
return text.split()
def _a ( self , _lowerCamelCase=False ):
return len(self._id_to_token )
def _a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _a ( self , _lowerCamelCase ):
return self._token_to_id.get(_lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def _a ( self , _lowerCamelCase ):
return self._id_to_token.get(_lowerCamelCase , self.unk_token )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.cls_token_id]
lowerCamelCase__ =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase__ =[1] + ([0] * len(_lowerCamelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowerCamelCase ) + [1]
return mask
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =os.path.join(_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(_lowerCamelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def _a ( self ):
return self.get_vocab_size(with_added_tokens=_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = False ):
return super()._add_tokens(_lowerCamelCase , special_tokens=_lowerCamelCase )
| 132
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[int]=1.0 , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: List[Any]=None ) -> Dict:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=400 , lowercase=2000 , lowercase=10 , lowercase=160 , lowercase=8 , lowercase=0.0 , lowercase=4000 , lowercase=False , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = padding_value
A__ = sampling_rate
A__ = return_attention_mask
A__ = do_normalize
A__ = feature_size
A__ = chunk_length
A__ = hop_length
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , lowercase=False , lowercase=False ) -> Tuple:
'''simple docstring'''
def _flatten(lowercase ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = WhisperFeatureExtractionTester(self )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
A__ = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(__lowerCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__lowerCamelCase )
A__ = self.feature_extraction_class.from_json_file(__lowerCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(__lowerCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
A__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
A__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ = np.asarray(__lowerCamelCase )
A__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
A__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test truncation required
A__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
A__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs_truncated]
A__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
A__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
import torch
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(100 , 32 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A__ = ds.sort("id" ).select(range(__lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = WhisperFeatureExtractor()
A__ = feature_extractor(__lowerCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowerCamelCase , atol=1e-4 ) )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = self._load_datasamples(1 )[0]
A__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
A__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowerCamelCase )[0]
self.assertTrue(np.all(np.mean(__lowerCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase ) - 1 ) < 1e-3 ) )
| 514
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "The csv file to plot."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Disable logarithmic scale when plotting"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__SCREAMING_SNAKE_CASE = list_field(
default=a , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
try:
int(UpperCamelCase__ )
return True
except ValueError:
return False
def _UpperCAmelCase (UpperCamelCase__ : int ):
try:
float(UpperCamelCase__ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Dict:
_A : int = args
_A : Union[str, Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline="") as csv_file:
_A : Union[str, Any] = csv.DictReader(__lowerCamelCase)
for row in reader:
_A : List[str] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"]))
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]):
# value is not None
_A : Union[str, Any] = int(row["result"])
elif can_convert_to_float(row["result"]):
# value is not None
_A : Dict = float(row["result"])
def _lowerCamelCase ( self) -> Dict:
_A , _A : Any = plt.subplots()
_A : Tuple = "Time usage" if self.args.is_time else "Memory usage"
_A : int = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log")
ax.set_yscale("log")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
_A : Optional[int] = sorted(set(self.result_dict[model_name]["bsz"]))
_A : List[str] = sorted(set(self.result_dict[model_name]["seq_len"]))
_A : int = self.result_dict[model_name]["result"]
((_A) , (_A)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_A : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_A : Tuple = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCamelCase , )
else:
_A : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_A) , (_A)) : Optional[Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
_A : Union[str, Any] = np.asarray(__lowerCamelCase , __lowerCamelCase)[: len(__lowerCamelCase)]
plt.scatter(
__lowerCamelCase , __lowerCamelCase , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(__lowerCamelCase , __lowerCamelCase , "--")
title_str += F" {label_model_name} vs."
_A : int = title_str[:-4]
_A : Dict = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__lowerCamelCase)
plt.xlabel(__lowerCamelCase)
plt.ylabel(__lowerCamelCase)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def _UpperCAmelCase ():
_A : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
_A : str = parser.parse_args_into_dataclasses()[0]
_A : Optional[Any] = Plot(args=UpperCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 503
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(UpperCAmelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 320
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : List[str] = 'roberta'
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict=5_02_65 , UpperCamelCase_ : List[Any]=7_68 , UpperCamelCase_ : List[Any]=12 , UpperCamelCase_ : int=12 , UpperCamelCase_ : str=30_72 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Any=5_12 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : str=0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[Any]="absolute" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Any , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ :str = num_hidden_layers
SCREAMING_SNAKE_CASE__ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ :Any = hidden_act
SCREAMING_SNAKE_CASE__ :List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ :Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ :Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ :List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ :int = use_cache
SCREAMING_SNAKE_CASE__ :Dict = classifier_dropout
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
@property
def __lowerCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ :Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ :List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 320
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.